code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.utils import six
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.reviews.models import Comment
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
review_diff_comment_item_mimetype,
review_diff_comment_list_mimetype)
from reviewboard.webapi.tests.mixins import (
BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_comment import (
CommentItemMixin,
CommentListMixin)
from reviewboard.webapi.tests.urls import (
get_review_diff_comment_item_url,
get_review_diff_comment_list_url)
class BaseResourceTestCase(BaseWebAPITestCase):
def _common_post_interdiff_comments(self, comment_text):
review_request, filediff = self._create_diff_review_request()
diffset = filediff.diffset
# Post the second diff.
interdiffset = self.create_diffset(review_request)
interfilediff = self.create_filediff(diffset)
review = self.create_review(review_request, user=self.user)
comment = self.create_diff_comment(review, filediff, interfilediff,
text=comment_text)
return comment, review_request, review, interdiffset.revision
def _create_diff_review_with_issue(self, publish=False, comment_text=None,
expected_status=201):
"""Sets up a review for a diff that includes a comment with an issue.
If `publish` is True, the review is published. The review request is
always published.
Returns the response from posting the comment, the review object, and
the review request object.
"""
if not comment_text:
comment_text = 'Test diff comment with an opened issue'
review_request, filediff = self._create_diff_review_request()
review = self.create_review(review_request, user=self.user,
publish=publish)
comment = self.create_diff_comment(review, filediff, text=comment_text,
issue_opened=True)
return comment, review, review_request
def _create_diff_review_request(self, with_local_site=False):
review_request = self.create_review_request(
create_repository=True,
submitter=self.user,
with_local_site=with_local_site,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
return review_request, filediff
def _create_diff_review(self):
review_request, filediff = self._create_diff_review_request()
review = self.create_review(review_request, publish=True)
self.create_diff_comment(review, filediff)
return review
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(CommentListMixin, ReviewRequestChildListMixin,
BaseResourceTestCase):
"""Testing the ReviewDiffCommentResource list APIs."""
fixtures = ['test_users', 'test_scmtools']
sample_api_url = 'review-requests/<id>/reviews/<id>/diff-comments/'
resource = resources.review_diff_comment
def setup_review_request_child_test(self, review_request):
if not review_request.repository_id:
# The group tests don't create a repository by default.
review_request.repository = self.create_repository()
review_request.save()
diffset = self.create_diffset(review_request)
self.create_filediff(diffset)
review = self.create_review(review_request, publish=True)
return (get_review_diff_comment_list_url(review),
review_diff_comment_list_mimetype)
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['issue_opened'], comment.issue_opened)
self.assertEqual(item_rsp['first_line'], comment.first_line)
self.assertEqual(item_rsp['num_lines'], comment.num_lines)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
create_repository=True,
with_local_site=with_local_site,
submitter=user,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, publish=True)
if populate_items:
items = [self.create_diff_comment(review, filediff)]
else:
items = []
return (get_review_diff_comment_list_url(review, local_site_name),
review_diff_comment_list_mimetype,
items)
def test_get_with_counts_only(self):
"""Testing the
GET review-requests/<id>/reviews/<id>/diff-comments/?counts-only=1 API
"""
review = self._create_diff_review()
rsp = self.api_get(get_review_diff_comment_list_url(review), {
'counts-only': 1,
}, expected_mimetype=review_diff_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['count'], review.comments.count())
def test_get_with_interdiff(self):
"""Testing the GET review-requests/<id>/reviews/<id>/diff-comments/ API
with interdiff
"""
comment_text = "Test diff comment"
comment, review_request, review, interdiff_revision = \
self._common_post_interdiff_comments(comment_text)
rsp = self.api_get(get_review_diff_comment_list_url(review), {
'interdiff-revision': interdiff_revision,
}, expected_mimetype=review_diff_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('diff_comments', rsp)
self.assertEqual(len(rsp['diff_comments']), 1)
self.assertEqual(rsp['diff_comments'][0]['text'], comment_text)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
create_repository=True,
with_local_site=with_local_site,
submitter=user,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, user=user)
return (get_review_diff_comment_list_url(review, local_site_name),
review_diff_comment_item_mimetype,
{
'filediff_id': filediff.pk,
'text': 'My new text',
'first_line': 1,
'num_lines': 2,
},
[review])
def check_post_result(self, user, rsp, review):
comment_rsp = rsp['diff_comment']
self.assertEqual(comment_rsp['text'], 'My new text')
self.assertEqual(comment_rsp['text_type'], 'plain')
comment = Comment.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
def test_post_with_issue(self):
"""Testing the
POST review-requests/<id>/reviews/<id>/diff-comments/ API
with an issue
"""
diff_comment_text = 'Test diff comment with an opened issue'
review_request, filediff = self._create_diff_review_request()
review = self.create_review(review_request, user=self.user)
rsp = self.api_post(
get_review_diff_comment_list_url(review),
{
'filediff_id': filediff.pk,
'issue_opened': True,
'first_line': 1,
'num_lines': 5,
'text': diff_comment_text,
},
expected_mimetype=review_diff_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('diff_comment', rsp)
self.assertEqual(rsp['diff_comment']['text'], diff_comment_text)
self.assertTrue(rsp['diff_comment']['issue_opened'])
def test_post_with_interdiff(self):
"""Testing the
POST review-requests/<id>/reviews/<id>/diff-comments/ API
with interdiff
"""
comment_text = "Test diff comment"
review_request, filediff = self._create_diff_review_request()
# Post the second diff.
interdiffset = self.create_diffset(review_request)
interfilediff = self.create_filediff(interdiffset)
review = self.create_review(review_request, user=self.user)
rsp = self.api_post(
get_review_diff_comment_list_url(review),
{
'filediff_id': filediff.pk,
'interfilediff_id': interfilediff.pk,
'issue_opened': True,
'first_line': 1,
'num_lines': 5,
'text': comment_text,
},
expected_mimetype=review_diff_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('diff_comment', rsp)
self.assertEqual(rsp['diff_comment']['text'], comment_text)
comment = Comment.objects.get(pk=rsp['diff_comment']['id'])
self.assertEqual(comment.filediff_id, filediff.pk)
self.assertEqual(comment.interfilediff_id, interfilediff.pk)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(CommentItemMixin, ReviewRequestChildItemMixin,
BaseResourceTestCase):
"""Testing the ReviewDiffCommentResource item APIs."""
fixtures = ['test_users', 'test_scmtools']
sample_api_url = 'review-requests/<id>/reviews/<id>/diff-comments/'
resource = resources.review_diff_comment
def setup_review_request_child_test(self, review_request):
if not review_request.repository_id:
# The group tests don't create a repository by default.
review_request.repository = self.create_repository()
review_request.save()
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, publish=True)
comment = self.create_diff_comment(review, filediff)
return (get_review_diff_comment_item_url(review, comment.pk),
review_diff_comment_item_mimetype)
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['issue_opened'], comment.issue_opened)
self.assertEqual(item_rsp['first_line'], comment.first_line)
self.assertEqual(item_rsp['num_lines'], comment.num_lines)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
create_repository=True,
with_local_site=with_local_site,
submitter=user,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, user=user)
comment = self.create_diff_comment(review, filediff)
return (get_review_diff_comment_item_url(review, comment.pk,
local_site_name),
[comment, review])
def check_delete_result(self, user, comment, review):
self.assertNotIn(comment, review.comments.all())
def test_delete_with_interdiff(self):
"""Testing the
DELETE review-requests/<id>/reviews/<id>/diff-comments/<id>/ API
"""
comment_text = "This is a test comment."
comment, review_request, review, interdiff_revision = \
self._common_post_interdiff_comments(comment_text)
self.api_delete(get_review_diff_comment_item_url(review, comment.pk))
rsp = self.api_get(get_review_diff_comment_list_url(review),
expected_mimetype=review_diff_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('diff_comments', rsp)
self.assertEqual(len(rsp['diff_comments']), 0)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
create_repository=True,
with_local_site=with_local_site,
submitter=user,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, user=user)
comment = self.create_diff_comment(review, filediff)
return (get_review_diff_comment_item_url(review, comment.pk,
local_site_name),
review_diff_comment_item_mimetype,
comment)
def test_get_not_modified(self):
"""Testing the
GET review-requests/<id>/reviews/<id>/diff-comments/<id>/ API
with Not Modified response
"""
review = self._create_diff_review()
comment = Comment.objects.all()[0]
self._testHttpCaching(
get_review_diff_comment_item_url(review, comment.id),
check_last_modified=True)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
create_repository=True,
with_local_site=with_local_site,
submitter=user,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, user=user)
comment = self.create_diff_comment(review, filediff)
return (get_review_diff_comment_item_url(review, comment.pk,
local_site_name),
review_diff_comment_item_mimetype,
{
'text': 'My new text',
},
comment,
[])
def check_put_result(self, user, item_rsp, comment, *args):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], 'My new text')
self.assertEqual(item_rsp['text_type'], 'plain')
self.compare_item(item_rsp, Comment.objects.get(pk=comment.pk))
def test_put_with_issue(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/diff-comments/<id>/ API,
removing issue_opened
"""
comment, review, review_request = self._create_diff_review_with_issue()
rsp = self.api_put(
get_review_diff_comment_item_url(review, comment.id),
{'issue_opened': False},
expected_mimetype=review_diff_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertFalse(rsp['diff_comment']['issue_opened'])
def test_put_issue_status_before_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/diff-comments/<id>/ API
with an issue, before review is published
"""
comment, review, review_request = self._create_diff_review_with_issue()
# The issue_status should not be able to be changed while the review is
# unpublished.
rsp = self.api_put(
get_review_diff_comment_item_url(review, comment.id),
{'issue_status': 'resolved'},
expected_mimetype=review_diff_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['diff_comment']['issue_status'], 'open')
def test_put_issue_status_after_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/diff-comments/<id>/ API
with an issue, after review is published
"""
comment, review, review_request = self._create_diff_review_with_issue(
publish=True)
rsp = self.api_put(
get_review_diff_comment_item_url(review, comment.id),
{'issue_status': 'resolved'},
expected_mimetype=review_diff_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['diff_comment']['issue_status'], 'resolved')
def test_put_issue_status_by_issue_creator(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/diff-comments/<id>/ API
permissions for issue creator
"""
comment, review, review_request = self._create_diff_review_with_issue(
publish=True)
# Change the owner of the review request so that it's not owned by
# self.user.
review_request.submitter = User.objects.get(username='doc')
review_request.save()
# The review/comment (and therefore issue) is still owned by self.user,
# so we should be able to change the issue status.
rsp = self.api_put(
get_review_diff_comment_item_url(review, comment.id),
{'issue_status': 'dropped'},
expected_mimetype=review_diff_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['diff_comment']['issue_status'], 'dropped')
def test_put_issue_status_by_uninvolved_user(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/diff-comments/<id>/ API
permissions for an uninvolved user
"""
comment, review, review_request = self._create_diff_review_with_issue(
publish=True)
# Change the owner of the review request and review so that they're
# not owned by self.user.
new_owner = User.objects.get(username='doc')
review_request.submitter = new_owner
review_request.save()
review.user = new_owner
review.save()
rsp = self.api_put(
get_review_diff_comment_item_url(review, comment.id),
{'issue_status': 'dropped'},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
def test_put_with_remove_issue_opened(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/diff-comments/<id>/ API,
removing the issue_opened state
"""
comment, review, review_request = self._create_diff_review_with_issue()
rsp = self.api_put(
get_review_diff_comment_item_url(review, comment.id),
{'issue_opened': False},
expected_mimetype=review_diff_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['diff_comment']['issue_status'], '')
|
1tush/reviewboard
|
reviewboard/webapi/tests/test_review_comment.py
|
Python
|
mit
| 19,814
|
from scapy.layers.l2 import Dot1Q
import abc
import six
from vpp_pg_interface import VppPGInterface
from vpp_interface import VppInterface
from vpp_papi import VppEnum
class L2_VTR_OP:
L2_DISABLED = 0
L2_PUSH_1 = 1
L2_PUSH_2 = 2
L2_POP_1 = 3
L2_POP_2 = 4
L2_TRANSLATE_1_1 = 5
L2_TRANSLATE_1_2 = 6
L2_TRANSLATE_2_1 = 7
L2_TRANSLATE_2_2 = 8
@six.add_metaclass(abc.ABCMeta)
class VppSubInterface(VppPGInterface):
@property
def parent(self):
"""Parent interface for this sub-interface"""
return self._parent
@property
def sub_id(self):
"""Sub-interface ID"""
return self._sub_id
@property
def tag1(self):
return self._tag1
@property
def tag2(self):
return self._tag2
@property
def vtr(self):
return self._vtr
def __init__(self, test, parent, sub_id):
VppInterface.__init__(self, test)
self._parent = parent
self._parent.add_sub_if(self)
self._sub_id = sub_id
self.DOT1AD_TYPE = 0x88A8
self.DOT1Q_TYPE = 0x8100
def set_sw_if_index(self, sw_if_index):
super(VppSubInterface, self).set_sw_if_index(sw_if_index)
self.set_vtr(L2_VTR_OP.L2_DISABLED)
@abc.abstractmethod
def create_arp_req(self):
pass
@abc.abstractmethod
def create_ndp_req(self):
pass
def resolve_arp(self):
super(VppSubInterface, self).resolve_arp(self.parent)
def resolve_ndp(self):
super(VppSubInterface, self).resolve_ndp(self.parent)
@abc.abstractmethod
def add_dot1_layer(self, pkt):
pass
def remove_vpp_config(self):
self.test.vapi.delete_subif(self.sw_if_index)
def _add_tag(self, packet, vlan, tag_type):
payload = packet.payload
inner_type = packet.type
packet.remove_payload()
packet.add_payload(Dot1Q(vlan=vlan) / payload)
packet.payload.type = inner_type
packet.payload.vlan = vlan
packet.type = tag_type
return packet
def _remove_tag(self, packet, vlan=None, tag_type=None):
if tag_type:
self.test.instance().assertEqual(packet.type, tag_type)
payload = packet.payload
if vlan:
self.test.instance().assertEqual(payload.vlan, vlan)
inner_type = payload.type
payload = payload.payload
packet.remove_payload()
packet.add_payload(payload)
packet.type = inner_type
return packet
def add_dot1q_layer(self, packet, vlan):
return self._add_tag(packet, vlan, self.DOT1Q_TYPE)
def add_dot1ad_layer(self, packet, outer, inner):
p = self._add_tag(packet, inner, self.DOT1Q_TYPE)
return self._add_tag(p, outer, self.DOT1AD_TYPE)
def remove_dot1q_layer(self, packet, vlan=None):
return self._remove_tag(packet, vlan, self.DOT1Q_TYPE)
def remove_dot1ad_layer(self, packet, outer=None, inner=None):
p = self._remove_tag(packet, outer, self.DOT1AD_TYPE)
return self._remove_tag(p, inner, self.DOT1Q_TYPE)
def set_vtr(self, vtr, push1q=0, tag=None, inner=None, outer=None):
self._tag1 = 0
self._tag2 = 0
self._push1q = 0
if (vtr == L2_VTR_OP.L2_PUSH_1 or
vtr == L2_VTR_OP.L2_TRANSLATE_1_1 or
vtr == L2_VTR_OP.L2_TRANSLATE_2_1):
self._tag1 = tag
self._push1q = push1q
if (vtr == L2_VTR_OP.L2_PUSH_2 or
vtr == L2_VTR_OP.L2_TRANSLATE_1_2 or
vtr == L2_VTR_OP.L2_TRANSLATE_2_2):
self._tag1 = outer
self._tag2 = inner
self._push1q = push1q
self.test.vapi.l2_interface_vlan_tag_rewrite(
sw_if_index=self.sw_if_index, vtr_op=vtr, push_dot1q=self._push1q,
tag1=self._tag1, tag2=self._tag2)
self._vtr = vtr
class VppDot1QSubint(VppSubInterface):
@property
def vlan(self):
"""VLAN tag"""
return self._vlan
def __init__(self, test, parent, sub_id, vlan=None):
super(VppDot1QSubint, self).__init__(test, parent, sub_id)
if vlan is None:
vlan = sub_id
self._vlan = vlan
r = test.vapi.create_vlan_subif(parent.sw_if_index, vlan)
self.set_sw_if_index(r.sw_if_index)
def create_arp_req(self):
packet = VppPGInterface.create_arp_req(self)
return self.add_dot1_layer(packet)
def create_ndp_req(self):
packet = VppPGInterface.create_ndp_req(self)
return self.add_dot1_layer(packet)
# called before sending packet
def add_dot1_layer(self, packet):
return self.add_dot1q_layer(packet, self.vlan)
# called on received packet to "reverse" the add call
def remove_dot1_layer(self, packet):
return self.remove_dot1q_layer(packet, self.vlan)
class VppDot1ADSubint(VppSubInterface):
@property
def outer_vlan(self):
"""Outer VLAN tag"""
return self._outer_vlan
@property
def inner_vlan(self):
"""Inner VLAN tag"""
return self._inner_vlan
def __init__(self, test, parent, sub_id, outer_vlan, inner_vlan):
super(VppDot1ADSubint, self).__init__(test, parent, sub_id)
flags = (VppEnum.vl_api_sub_if_flags_t.SUB_IF_API_FLAG_DOT1AD |
VppEnum.vl_api_sub_if_flags_t.SUB_IF_API_FLAG_TWO_TAGS |
VppEnum.vl_api_sub_if_flags_t.SUB_IF_API_FLAG_EXACT_MATCH)
r = test.vapi.create_subif(sw_if_index=parent.sw_if_index,
sub_id=sub_id, outer_vlan_id=outer_vlan,
inner_vlan_id=inner_vlan,
sub_if_flags=flags)
self.set_sw_if_index(r.sw_if_index)
self._outer_vlan = outer_vlan
self._inner_vlan = inner_vlan
def create_arp_req(self):
packet = VppPGInterface.create_arp_req(self)
return self.add_dot1_layer(packet)
def create_ndp_req(self):
packet = VppPGInterface.create_ndp_req(self)
return self.add_dot1_layer(packet)
def add_dot1_layer(self, packet):
return self.add_dot1ad_layer(packet, self.outer_vlan, self.inner_vlan)
def remove_dot1_layer(self, packet):
return self.remove_dot1ad_layer(packet, self.outer_vlan,
self.inner_vlan)
class VppP2PSubint(VppSubInterface):
def __init__(self, test, parent, sub_id, remote_mac):
super(VppP2PSubint, self).__init__(test, parent, sub_id)
r = test.vapi.p2p_ethernet_add(parent.sw_if_index,
remote_mac, sub_id)
self.set_sw_if_index(r.sw_if_index)
self.parent_sw_if_index = parent.sw_if_index
self.p2p_remote_mac = remote_mac
def add_dot1_layer(self, packet):
return packet
def remove_dot1_layer(self, packet):
return packet
def create_arp_req(self):
packet = VppPGInterface.create_arp_req(self)
return packet
def create_ndp_req(self):
packet = VppPGInterface.create_ndp_req(self)
return packet
|
vpp-dev/vpp
|
test/vpp_sub_interface.py
|
Python
|
apache-2.0
| 7,153
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
from core import barf
plugin_dir = os.path.abspath(os.path.dirname(__file__ )) + "/"
sys.path.append(plugin_dir)
for module in os.listdir(os.path.dirname(plugin_dir)):
if os.path.isfile( plugin_dir + "/" + module ) and module != 'Plugin.py' and module != 'PluginManager.py':
module_name, ext = os.path.splitext(module)
library_list = []
if ext == '.py' and module_name != '__init__': # Important, ignore .pyc/other files.
try:
module = __import__(module_name)
barf.Barf('PLG', 'Imported plugin: \033[1m%s' % module_name)
library_list.append(module)
except ImportError as e:
barf.Barf('ERR', "Failed to load plugin ( IE ): \033[1m%s" % module_name)
barf.Barf('TAB', e)
except NameError as e:
barf.Barf('ERR', "Failed to load plugin ( NE ): \033[1m%s" % module_name)
barf.Barf('TAB', e)
|
RobertTheMagnificent/scrib
|
plugins/__init__.py
|
Python
|
gpl-2.0
| 941
|
# -*- coding: utf-8 -*-
""" This module returns stats about the DynamoDB table """
import math
from datetime import datetime, timedelta
from boto.exception import JSONResponseError, BotoServerError
from retrying import retry
from dynamic_dynamodb.aws import dynamodb
from dynamic_dynamodb.log_handler import LOGGER as logger
from dynamic_dynamodb.aws.cloudwatch import (
__get_connection_cloudwatch as cloudwatch_connection)
def get_consumed_read_units_percent(
table_name, gsi_name, lookback_window_start=15):
""" Returns the number of consumed read units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type lookback_window_start: int
:param lookback_window_start: How many seconds to look at
:returns: int -- Number of consumed reads
"""
try:
metrics = __get_aws_metric(
table_name,
gsi_name,
lookback_window_start,
'ConsumedReadCapacityUnits')
except BotoServerError:
raise
if metrics:
consumed_read_units = int(
math.ceil(float(metrics[0]['Sum'])/float(300)))
else:
consumed_read_units = 0
try:
consumed_read_units_percent = int(
math.ceil(
float(consumed_read_units) /
float(dynamodb.get_provisioned_gsi_read_units(
table_name, gsi_name)) * 100))
except JSONResponseError:
raise
logger.info('{0} - GSI: {1} - Consumed read units: {2:d}%'.format(
table_name, gsi_name, consumed_read_units_percent))
return consumed_read_units_percent
def get_throttled_read_event_count(
table_name, gsi_name, lookback_window_start=15):
""" Returns the number of throttled read events during a given time frame
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type lookback_window_start: int
:param lookback_window_start: How many seconds to look at
:returns: int -- Number of throttled read events
"""
try:
metrics = __get_aws_metric(
table_name, gsi_name, lookback_window_start, 'ReadThrottleEvents')
except BotoServerError:
raise
if metrics:
throttled_read_events = int(metrics[0]['Sum'])
else:
throttled_read_events = 0
logger.info('{0} - GSI: {1} - Read throttle count: {2:d}'.format(
table_name, gsi_name, throttled_read_events))
return throttled_read_events
def get_consumed_write_units_percent(
table_name, gsi_name, lookback_window_start=15):
""" Returns the number of consumed write units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type lookback_window_start: int
:param lookback_window_start: How many seconds to look at
:returns: int -- Number of consumed writes
"""
try:
metrics = __get_aws_metric(
table_name,
gsi_name,
lookback_window_start,
'ConsumedWriteCapacityUnits')
except BotoServerError:
raise
if metrics:
consumed_write_units = int(
math.ceil(float(metrics[0]['Sum'])/float(300)))
else:
consumed_write_units = 0
try:
consumed_write_units_percent = int(
math.ceil(
float(consumed_write_units) /
float(dynamodb.get_provisioned_gsi_write_units(
table_name, gsi_name)) * 100))
except JSONResponseError:
raise
logger.info('{0} - GSI: {1} - Consumed write units: {2:d}%'.format(
table_name, gsi_name, consumed_write_units_percent))
return consumed_write_units_percent
def get_throttled_write_event_count(
table_name, gsi_name, lookback_window_start=15):
""" Returns the number of throttled write events during a given time frame
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type lookback_window_start: int
:param lookback_window_start: How many seconds to look at
:returns: int -- Number of throttled write events
"""
try:
metrics = __get_aws_metric(
table_name, gsi_name, lookback_window_start, 'WriteThrottleEvents')
except BotoServerError:
raise
if metrics:
throttled_write_events = int(metrics[0]['Sum'])
else:
throttled_write_events = 0
logger.info('{0} - GSI: {1} - Write throttle count: {2:d}'.format(
table_name, gsi_name, throttled_write_events))
return throttled_write_events
@retry(
wait='exponential_sleep',
wait_exponential_multiplier=1000,
wait_exponential_max=10000,
stop_max_attempt_number=10)
def __get_aws_metric(table_name, gsi_name, lookback_window_start, metric_name):
""" Returns a metric list from the AWS CloudWatch service, may return
None if no metric exists
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of a GSI on the given DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: How many minutes to look at
:type metric_name str
:param metric_name Name of the metric to retrieve from CloudWatch
:returns: list --
A list of time series data for the given metric, may be None if
there was no data
"""
try:
now = datetime.utcnow()
start_time = now-timedelta(minutes=lookback_window_start)
end_time = now-timedelta(minutes=lookback_window_start-5)
return cloudwatch_connection().get_metric_statistics(
period=300, # Always look at 5 minutes windows
start_time=start_time,
end_time=end_time,
metric_name=metric_name,
namespace='AWS/DynamoDB',
statistics=['Sum'],
dimensions={
'TableName': table_name,
'GlobalSecondaryIndexName': gsi_name
},
unit='Count')
except BotoServerError as error:
logger.error(
'Unknown boto error. Status: "{0}". '
'Reason: "{1}". Message: {2}'.format(
error.status,
error.reason,
error.message))
raise
|
tellybug/dynamic-dynamodb
|
dynamic_dynamodb/statistics/gsi.py
|
Python
|
apache-2.0
| 6,518
|
from django.conf.urls import url
from .views import dashboard
urlpatterns = [
url(r'^$', dashboard, name='dashboard'),
]
|
Darkduke68/MatchMaker
|
dashboards/urls.py
|
Python
|
mit
| 125
|
# -*- coding: utf-8 -*-
"""
flask.ext.admin._compat
~~~~~~~~~~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
VER = sys.version_info
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
def as_unicode(s):
if isinstance(s, bytes):
return s.decode('utf-8')
return str(s)
# Various tools
from functools import reduce
from urllib.parse import urljoin, urlparse
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
def as_unicode(s):
if isinstance(s, str):
return s.decode('utf-8')
return unicode(s)
# Helpers
reduce = __builtins__['reduce'] if isinstance(__builtins__, dict) else __builtins__.reduce
from urlparse import urljoin, urlparse
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from collections import OrderedDict
except ImportError:
# Bare-bones OrderedDict implementation for Python2.6 compatibility
class OrderedDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.ordered_keys = []
def __setitem__(self, key, value):
self.ordered_keys.append(key)
dict.__setitem__(self, key, value)
def __iter__(self):
return (k for k in self.ordered_keys)
def iteritems(self):
return ((k, self[k]) for k in self.ordered_keys)
def items(self):
return list(self.iteritems())
|
Jumpscale/web
|
pythonlib/flask_admin/_compat.py
|
Python
|
apache-2.0
| 2,865
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Load and save functions for zipped svg files."""
import faint.svg.parse_svg as parse_svg
import faint.svg.write_svg as write_svg
def load(filename, imageprops):
"""Load image from the zipped svg file."""
parse_svg.parse_svgz_file(filename, imageprops, "en")
def save(filename, canvas):
"""Save the image to the specified file as zipped svg."""
write_svg.write_svgz(filename, canvas)
|
lukas-ke/faint-graphics-editor
|
py/faint/formatsvgz.py
|
Python
|
apache-2.0
| 1,026
|
from setuptools import setup, find_packages
setup(
name='temproject',
version='0.1.0',
packages=find_packages(),
package_data={
'': ['*.html'],
},
install_requires=['tornado'],
)
|
WebWorkshopers/TemperaturaProject
|
setup.py
|
Python
|
mit
| 215
|
import _plotly_utils.basevalidators
class MetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="metasrc", parent_name="splom", **kwargs):
super(MetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/splom/_metasrc.py
|
Python
|
mit
| 436
|
#! /usr/bin/python2.7
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodestring', 'decodestring',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
_translation = [chr(_x) for _x in range(256)]
EMPTYSTRING = ''
def _translate(s, altchars):
translation = _translation[:]
for k, v in altchars.items():
translation[ord(k)] = v
return s.translate(''.join(translation))
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a string using Base64.
s is the string to encode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The encoded string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
return encoded
def b64decode(s, altchars=None):
"""Decode a Base64 encoded string.
s is the string to decode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies the
alternative alphabet used instead of the '+' and '/' characters.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if altchars is not None:
s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
try:
return binascii.a2b_base64(s)
except binascii.Error, msg:
# Transform this exception for consistency
raise TypeError(msg)
def standard_b64encode(s):
"""Encode a string using the standard Base64 alphabet.
s is the string to encode. The encoded string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
"""
return b64decode(s)
def urlsafe_b64encode(s):
"""Encode a string using a url-safe Base64 alphabet.
s is the string to encode. The encoded string is returned. The alphabet
uses '-' instead of '+' and '_' instead of '/'.
"""
return b64encode(s, '-_')
def urlsafe_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
return b64decode(s, '-_')
# Base32 encoding/decoding must be done in Python
_b32alphabet = {
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 14: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
_b32tab = _b32alphabet.items()
_b32tab.sort()
_b32tab = [v for k, v in _b32tab]
_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
def b32encode(s):
"""Encode a string using Base32.
s is the string to encode. The encoded string is returned.
"""
parts = []
quanta, leftover = divmod(len(s), 5)
# Pad the last quantum with zero bits if necessary
if leftover:
s += ('\0' * (5 - leftover))
quanta += 1
for i in range(quanta):
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
# code is to process the 40 bits in units of 5 bits. So we take the 1
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
# bits of c2 and tack them onto c3. The shifts and masks are intended
# to give us values of exactly 5 bits in width.
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
c2 += (c1 & 1) << 16 # 17 bits wide
c3 += (c2 & 3) << 8 # 10 bits wide
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
])
encoded = EMPTYSTRING.join(parts)
# Adjust for any leftover partial quanta
if leftover == 1:
return encoded[:-6] + '======'
elif leftover == 2:
return encoded[:-4] + '===='
elif leftover == 3:
return encoded[:-3] + '==='
elif leftover == 4:
return encoded[:-1] + '='
return encoded
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = _translate(s, {'0': 'O', '1': map01})
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search('(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a string using Base16.
s is the string to encode. The encoded string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if casefold:
s = s.upper()
if re.search('[^0-9A-F]', s):
raise TypeError('Non-base16 digit found')
return binascii.unhexlify(s)
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def encodestring(s):
"""Encode a string into multiple lines of base-64 data."""
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
def decodestring(s):
"""Decode a string."""
return binascii.a2b_base64(s)
# Useable as a script...
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout)
else:
func(sys.stdin, sys.stdout)
def test1():
s0 = "Aladdin:open sesame"
s1 = encodestring(s0)
s2 = decodestring(s1)
print s0, repr(s1), s2
if __name__ == '__main__':
test()
|
ruibarreira/linuxtrail
|
usr/lib/python2.7/base64.py
|
Python
|
gpl-3.0
| 11,356
|
from __future__ import unicode_literals
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
from django.contrib.auth import get_user_model
from . import models
User = get_user_model()
class UserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(Field("name"))
class Meta:
model = User
fields = ["name"]
class ProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Field("picture"),
Field("bio"),
Submit("update", "Update", css_class="btn-success"),
)
class Meta:
model = models.Profile
fields = ["picture", "bio"]
|
arocks/edge
|
src/profiles/forms.py
|
Python
|
mit
| 1,105
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from xml.sax.saxutils import quoteattr, escape
from urllib import quote
from datetime import datetime,timedelta
from MaKaC.webinterface.pages.conferences import WPConferenceBase, WPConferenceModifBase
from MaKaC.webinterface.pages.conferences import WContribParticipantList
from MaKaC.webinterface import urlHandlers
from MaKaC.webinterface import wcomponents
from MaKaC import review
from indico.core.config import Config
from MaKaC.common import filters
from MaKaC.webinterface.common.contribStatusWrapper import ContribStatusList
from MaKaC.i18n import _
from indico.util.i18n import i18nformat
from MaKaC.common.fossilize import fossilize
from MaKaC.fossils.conference import ILocalFileAbstractMaterialFossil
from MaKaC.webinterface.pages.abstracts import WAbstractManagmentAccept, WAbstractManagmentReject
from MaKaC.common.TemplateExec import render
class WPTrackModifBase(WPConferenceModifBase):
sidemenu_option = 'program'
def __init__(self, rh, track, subTrack=None):
WPConferenceModifBase.__init__(self, rh, track.getConference())
self._track = track
self._subTrack = subTrack
def _getNavigationDrawer(self):
if self._subTrack:
target = self._subTrack
else:
target = self._track
pars = {"target": target, "isModif": True}
return wcomponents.WNavigationDrawer( pars, bgColor="white" )
def _createTabCtrl( self ):
self._tabCtrl = wcomponents.TabControl()
self._tabMain = self._tabCtrl.newTab( "main", _("Main"), \
urlHandlers.UHTrackModification.getURL( self._track ) )
self._tabSubTrack = None
self._tabCoordination= self._tabCtrl.newTab( "cc", \
_("Coordination control"), \
urlHandlers.UHTrackModifCoordination.getURL( self._track ) )
self._tabAbstracts = self._tabCtrl.newTab( "abstracts", _("Abstracts"), \
urlHandlers.UHTrackModifAbstracts.getURL( self._track ) )
if self._conf.getAbstractMgr().isActive() and self._conf.hasEnabledSection("cfa"):
self._tabAbstracts.enable()
self._tabCoordination.enable()
else:
self._tabAbstracts.disable()
self._tabCoordination.disable()
self._tabContribs=self._tabCtrl.newTab( "contribs", _("Contributions"), \
urlHandlers.UHTrackModContribList.getURL(self._track))
self._setActiveTab()
def _setActiveTab( self ):
pass
def _getPageContent( self, params ):
self._createTabCtrl()
banner = wcomponents.WTrackBannerModif(self._track, isManager=self._tabMain.isEnabled()).getHTML()
html = wcomponents.WTabControl( self._tabCtrl, self._getAW() ).getHTML( self._getTabContent( params ) )
return banner+html
def _getTabContent( self, params ):
return _("nothing")
def _getHeadContent(self):
return WPConferenceModifBase._getHeadContent(self) + render('js/mathjax.config.js.tpl') + \
'\n'.join(['<script src="{0}" type="text/javascript"></script>'.format(url)
for url in self._asset_env['mathjax_js'].urls()])
class WTrackModifMain(wcomponents.WTemplated):
def __init__(self,track):
self._track = track
def getVars(self):
vars=wcomponents.WTemplated.getVars(self)
vars["title"]=self.htmlText(self._track.getTitle())
vars["description"]=self.htmlText(self._track.getDescription())
vars["code"]=self.htmlText(self._track.getCode())
vars["dataModificationURL"]=quoteattr(str(urlHandlers.UHTrackDataModif.getURL(self._track)))
return vars
class WPTrackModification( WPTrackModifBase ):
def _getTabContent( self, params ):
comp=WTrackModifMain(self._track)
return comp.getHTML()
class WTrackDataModification(wcomponents.WTemplated):
def __init__(self,track):
self._track=track
self._conf=track.getConference()
def getVars(self):
vars=wcomponents.WTemplated.getVars(self)
vars["code"]=quoteattr(str(self._track.getCode()))
vars["title"]=quoteattr(str(self._track.getTitle()))
vars["description"]=self.htmlText(self._track.getDescription())
vars["postURL"]=quoteattr(str(urlHandlers.UHTrackPerformDataModification.getURL(self._track)))
return vars
class WPTrackDataModification( WPTrackModification ):
def _getTabContent( self, params ):
p=WTrackDataModification(self._track)
return p.getHTML()
class _AbstractStatusTrackView:
_label = ""
_color = ""
_id = ""
_icon = ""
def __init__( self, track, abstract ):
self._track = track
self._abstract = abstract
def getLabel( cls ):
return _(cls._label)
#ne pas oublier d'appeler la fonction de traduction
getLabel=classmethod(getLabel)
def getComment( self ):
return ""
def getResponsible( self ):
return None
def getDate( self ):
return None
def getColor( cls ):
return cls._color
getColor = classmethod( getColor )
def getId( cls ):
return cls._id
getId = classmethod( getId )
def getIconURL( cls ):
return Config.getInstance().getSystemIconURL( cls._icon )
getIconURL = classmethod( getIconURL )
class _ASTrackViewSubmitted( _AbstractStatusTrackView ):
_color = "white"
_id = "submitted"
_icon = "ats_submitted"
#don't modify it _("submitted")
_label = "submitted"
class _ASTrackViewAccepted( _AbstractStatusTrackView ):
_color = "white"
_id = "accepted"
_icon = "ats_accepted"
#don't modify it _("accepted")
_label = "accepted"
def getComment( self ):
return self._abstract.getCurrentStatus().getComments()
def getResponsible( self ):
return self._abstract.getCurrentStatus().getResponsible()
def getDate( self ):
return self._abstract.getCurrentStatus().getDate()
def getContribType(self):
return self._abstract.getCurrentStatus().getType()
class _ASTrackViewAcceptedForOther( _AbstractStatusTrackView ):
_color = "white"
_id = "accepted_other"
_icon = "ats_accepted_other"
#don't modify it _("accepted for other track")
_label = "accepted for another track"
def getComment( self ):
return self._abstract.getCurrentStatus().getComments()
def getResponsible( self ):
return self._abstract.getCurrentStatus().getResponsible()
def getDate( self ):
return self._abstract.getCurrentStatus().getDate()
def getTrack( self ):
return self._abstract.getCurrentStatus().getTrack()
class _ASTrackViewRejected( _AbstractStatusTrackView ):
_color = "white"
_id = "rejected"
_icon = "ats_rejected"
#don't modify it _("rejected")
_label = "rejected"
def getComment( self ):
return self._abstract.getCurrentStatus().getComments()
def getResponsible( self ):
return self._abstract.getCurrentStatus().getResponsible()
def getDate( self ):
return self._abstract.getCurrentStatus().getDate()
class _ASTrackViewPA( _AbstractStatusTrackView ):
_color = "white"
_id = "pa"
_icon = "ats_prop_accept"
#next comment is for translation extraction
# _("proposed to be accepted")
_label = "proposed to be accepted"
def getComment( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getComment()
def getResponsible( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getResponsible()
def getDate( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getDate()
def getContribType( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getContribType()
def getConflicts( self ):
#Event if the abstract is not in "in conflict" status we want to
# show up current conflicts
acc = self._abstract.getTrackAcceptanceList()
#If there is only 1 track judgement accepting the abstract and it's
# ours, then no conflict is reported
if len( acc ) == 1 and acc[0].getTrack() == self._track:
return []
return acc
class _ASTrackViewPR( _AbstractStatusTrackView ):
_color = "white"
_id = "pr"
_icon = "ats_prop_reject"
#don't modify it _("rejected")
_label = "proposed to be rejected"
def getComment( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getComment()
def getResponsible( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getResponsible()
def getDate( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getDate()
class _ASTrackViewIC( _AbstractStatusTrackView ):
_color = "white"
_id = "c"
_icon = "as_conflict"
_label = "in conflict"
class _ASTrackViewPFOT( _AbstractStatusTrackView ):
_color = "white"
_id = "pfot"
_icon = "ats_prop_other_track"
#don't modify it _("rejected")
_label = "proposed for other tracks"
def getComment( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getComment()
def getResponsible( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getResponsible()
def getDate( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getDate()
def getProposedTrackList( self ):
jud = self._abstract.getTrackJudgement( self._track )
return jud.getProposedTrackList()
class _ASTrackViewWithdrawn( _AbstractStatusTrackView ):
_color = "white"
_id = "withdrawn"
_icon = "ats_withdrawn"
#don't modify it _("withdrawn")
_label = "withdrawn"
def getComment( self ):
return self._abstract.getCurrentStatus().getComments()
def getDate( self ):
return self._abstract.getCurrentStatus().getDate()
class _ASTrackViewDuplicated( _AbstractStatusTrackView ):
_color = "white"
_id = "duplicated"
_icon = "ats_withdrawn"
#don't modify it _("duplicated")
_label = "duplicated"
def getComment( self ):
return self._abstract.getCurrentStatus().getComments()
def getResponsible( self ):
return self._abstract.getCurrentStatus().getResponsible()
def getDate( self ):
return self._abstract.getCurrentStatus().getDate()
def getOriginal(self):
return self._abstract.getCurrentStatus().getOriginal()
class _ASTrackViewMerged(_AbstractStatusTrackView):
_color = "white"
_id = "merged"
_icon = "ats_withdrawn"
#don't modify it _("merged")
_label = "merged"
def getComment( self ):
return self._abstract.getCurrentStatus().getComments()
def getResponsible( self ):
return self._abstract.getCurrentStatus().getResponsible()
def getDate( self ):
return self._abstract.getCurrentStatus().getDate()
def getTarget(self):
return self._abstract.getCurrentStatus().getTargetAbstract()
class AbstractStatusTrackViewFactory:
def __init__(self):
self._status = {
_ASTrackViewSubmitted.getId(): _ASTrackViewSubmitted, \
_ASTrackViewAccepted.getId(): _ASTrackViewAccepted, \
_ASTrackViewAcceptedForOther.getId(): _ASTrackViewAcceptedForOther, \
_ASTrackViewRejected.getId(): _ASTrackViewRejected, \
_ASTrackViewPA.getId(): _ASTrackViewPA, \
_ASTrackViewPR.getId(): _ASTrackViewPR, \
_ASTrackViewIC.getId(): _ASTrackViewIC, \
_ASTrackViewPFOT.getId(): _ASTrackViewPFOT, \
_ASTrackViewWithdrawn.getId(): _ASTrackViewWithdrawn,\
_ASTrackViewDuplicated.getId(): _ASTrackViewDuplicated, \
_ASTrackViewMerged.getId(): _ASTrackViewMerged }
def getStatus( track, abstract ):
d = { \
review.AbstractStatusSubmitted: _ASTrackViewSubmitted, \
review.AbstractStatusRejected: _ASTrackViewRejected, \
review.AbstractStatusWithdrawn: _ASTrackViewWithdrawn, \
review.AbstractStatusDuplicated: _ASTrackViewDuplicated, \
review.AbstractStatusMerged: _ASTrackViewMerged \
}
status = abstract.getCurrentStatus()
if d.has_key( status.__class__ ):
return d[ status.__class__ ](track, abstract)
# return d[ status.__class__ ](track, abstract)
#For the accepted status, we need to know if it has been accepted for
# the current track
if status.__class__ == review.AbstractStatusAccepted:
if status.getTrack() != track:
return _ASTrackViewAcceptedForOther( track, abstract )
return _ASTrackViewAccepted( track, abstract )
#If it is not in one of the "common" status we must see if a judgement
# for the current track has already been done
jud = abstract.getTrackJudgement( track )
if jud:
if jud.__class__ == review.AbstractAcceptance:
return _ASTrackViewPA( track, abstract )
elif jud.__class__ == review.AbstractRejection:
return _ASTrackViewPR( track, abstract )
elif jud.__class__ == review.AbstractReallocation:
return _ASTrackViewPFOT( track, abstract )
elif jud.__class__ == review.AbstractInConflict:
return _ASTrackViewIC( track, abstract )
#If no judgement exists for the current track the abstract is in
# SUBMITTED status for the track
return _ASTrackViewSubmitted( track, abstract )
getStatus = staticmethod( getStatus )
def getStatusList( self ):
return self._status.values()
def getStatusById( self, id ):
return self._status[id]
class WTrackModifAbstracts( wcomponents.WTemplated ):
def __init__( self, track, filterCrit, sortingCrit, order, filterUsed=False, canModify=False ):
self._track = track
self._conf = self._track.getConference()
self._filterCrit = filterCrit
self._sortingCrit = sortingCrit
self._order = order
self._filterUsed = filterUsed
self._canModify = canModify
def _getAbstractHTML( self, abstract ):
aStatus = AbstractStatusTrackViewFactory().getStatus( self._track, abstract )
url = urlHandlers.UHTrackAbstractModif.getURL( self._track, abstract )
label = aStatus.getLabel()
icon = """<img src=%s border="0" alt="">"""%quoteattr( str( aStatus.getIconURL() ) )
accType=""
if isinstance(aStatus,_ASTrackViewPA):
label="""%s"""%(label)
if aStatus.getContribType() is not None and aStatus.getContribType()!="":
accType=aStatus.getContribType().getName()
if aStatus.getConflicts():
label = i18nformat("""%s<br><font color="red">[ _("conflicts") ]</font>""")%label
elif isinstance(aStatus,_ASTrackViewAccepted):
if aStatus.getContribType() is not None and aStatus.getContribType()!="":
accType=aStatus.getContribType().getName()
label="""%s"""%(label)
contribType=abstract.getContribType()
contribTypeName = i18nformat("""--_("not specified")--""")
if contribType is not None:
contribTypeName=contribType.getName()
comments = ""
if abstract.getComments():
comments = i18nformat(""" <img src=%s alt="_("The submitter filled some comments")">""")%(quoteattr(Config.getInstance().getSystemIconURL("comments")))
html = """
<tr id="abstracts%s" class="abstract">
<td align="right" width="3%%" valign="top"><input type="checkbox" name="abstracts" value="%s"%s></td>
<td nowrap class="CRLabstractDataCell">%s%s</td>
<td width="100%%" align="left" valign="top" class="CRLabstractDataCell">
<a href="%s">%s</a></td>
<td valign="top" class="CRLabstractDataCell">%s</td>
<td nowrap valign="top" class="CRLabstractDataCell">%s %s</td>
<td valign="top" class="CRLabstractDataCell">%s</td>
<td nowrap valign="top" class="CRLabstractDataCell">%s</td>
</tr>
""" % (abstract.getId(), \
abstract.getId(),self._checked, \
self.htmlText(abstract.getId()),comments,\
str(url),self.htmlText(abstract.getTitle()),\
self.htmlText(contribTypeName),icon, \
label,self.htmlText(accType),\
abstract.getSubmissionDate().strftime("%d %B %Y"))
return html
def _getURL( self ):
#builds the URL to the track management abstract list page
# preserving the current filter and sorting status
url = urlHandlers.UHTrackModifAbstracts.getURL( self._track )
if self._filterCrit.getField( "type" ):
l=[]
for t in self._filterCrit.getField( "type" ).getValues():
if t:
l.append(t.getId())
url.addParam( "selTypes", l )
if self._filterCrit.getField( "type" ).getShowNoValue():
url.addParam( "typeShowNoValue", "1" )
if self._filterCrit.getField( "status" ):
url.addParam( "selStatus", self._filterCrit.getField( "status" ).getValues() )
if self._filterCrit.getField( "acc_type" ):
l=[]
for t in self._filterCrit.getField( "acc_type" ).getValues():
if t:
l.append(t.getId())
url.addParam("selAccTypes",l)
if self._filterCrit.getField( "acc_type" ).getShowNoValue():
url.addParam( "accTypeShowNoValue", "1" )
if self._filterCrit.getField( "multiple_tracks" ):
url.addParam( "selMultipleTracks", "1" )
if self._filterCrit.getField( "comment" ):
url.addParam( "selOnlyComment", "1" )
if self._sortingCrit.getField():
url.addParam( "sortBy", self._sortingCrit.getField().getId() )
url.setSegment("abstracts")
return url
def _getTypeFilterItemList( self ):
checked = ""
if self._filterCrit.getField("type").getShowNoValue():
checked = " checked"
l = [ i18nformat("""<input type="checkbox" name="typeShowNoValue"%s> --_("not specified")--""")%checked]
#for type in self._conf.getAbstractMgr().getContribTypeList():
for type in self._conf.getContribTypeList():
checked = ""
if type in self._filterCrit.getField("type").getValues():
checked = " checked"
l.append( """<input type="checkbox" name="selTypes" value=%s%s> %s"""%(quoteattr(type.getId()), checked, self.htmlText(type.getName())) )
return l
def _getAccTypeFilterItemList(self):
checked=""
if self._filterCrit.getField("acc_type").getShowNoValue():
checked = " checked"
l = [ i18nformat("""<input type="checkbox" name="accTypeShowNoValue"%s> --_("not specified")--""")%checked]
for type in self._conf.getContribTypeList():
checked = ""
if type in self._filterCrit.getField("acc_type").getValues():
checked=" checked"
l.append("""<input type="checkbox" name="selAccTypes" value=%s%s> %s"""%(quoteattr(type.getId()),checked,self.htmlText(type.getName())))
return l
def _getStatusFilterItemList( self ):
l = []
for statusKlass in AbstractStatusTrackViewFactory().getStatusList():
checked = ""
statusId = statusKlass.getId()
statusCaption = statusKlass.getLabel()
if statusId in self._filterCrit.getField("status").getValues():
checked = " checked"
iconHTML = """<img src=%s border="0" alt="">"""%quoteattr( str( statusKlass.getIconURL() ) )
l.append( """<input type="checkbox" name="selStatus" value=%s%s> %s %s"""%(quoteattr(statusId), checked, iconHTML, self.htmlText( statusCaption )) )
return l
def _getOthersFilterItemList( self ):
checkedMulTracks, checkedOnlyComment = "", ""
if self._filterCrit.getField("multiple_tracks"):
checkedMulTracks = " checked"
if self._filterCrit.getField("comment"):
checkedOnlyComment = " checked"
l = [ i18nformat("""<input type="checkbox" name="selMultipleTracks"%s> _("only multiple tracks")""")%checkedMulTracks, \
i18nformat("""<input type="checkbox" name="selOnlyComment"%s> _("only with comments")""")%checkedOnlyComment]
return l
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["types"] = "<br>".join( self._getTypeFilterItemList() )
vars["status"] = "<br>".join( self._getStatusFilterItemList() )
vars["others"] = "<br>".join( self._getOthersFilterItemList() )
vars["accTypes"] = "<br>".join( self._getAccTypeFilterItemList() )
f = filters.SimpleFilter(self._filterCrit,self._sortingCrit)
al = []
abstractsToPrint = []
self._checked = ""
if vars["selectAll"]:
self._checked = " checked"
abstractList = f.apply( self._track.getAbstractList() )
for abstract in abstractList:
al.append( self._getAbstractHTML( abstract ) )
abstractsToPrint.append("""<input type="hidden" name="abstracts" value="%s">"""%abstract.getId())
vars["filteredNumberAbstracts"] = str(len(abstractList))
vars["totalNumberAbstracts"] = str(len(self._track.getAbstractList()))
if self._order == "up":
al.reverse()
vars["abstracts"] = "".join( al )
vars["abstractsToPrint"] = "\n".join(abstractsToPrint)
sortingField = self._sortingCrit.getField()
vars["currentSorting"] = ""
for crit in ["type", "status", "number", "date"]:
url = self._getURL()
vars["%sImg" % crit] = ""
url.addParam("sortBy", crit)
if sortingField.getId() == crit:
vars["currentSorting"] = '<input type="hidden" name="sortBy" value="%s">' % crit
if self._order == "down":
vars["%sImg" % crit] = """<img src="%s" alt="">"""%(Config.getInstance().getSystemIconURL("downArrow"))
url.addParam("order","up")
elif self._order == "up":
vars["%sImg" % crit] = """<img src="%s" alt="">"""%(Config.getInstance().getSystemIconURL("upArrow"))
url.addParam("order","down")
vars["%sSortingURL" % crit] = str(url)
url = urlHandlers.UHTrackModifAbstracts.getURL( self._track )
url.addParam("order", self._order)
url.addParam("OK", "1")
url.setSegment( "abstracts" )
vars["filterPostURL"]=quoteattr(str(url))
vars["accessAbstract"] = quoteattr(str(urlHandlers.UHTrackAbstractDirectAccess.getURL(self._track)))
vars["allAbstractsURL"] = str(urlHandlers.UHConfAbstractManagment.getURL(self._conf))
l = []
for tpl in self._conf.getAbstractMgr().getNotificationTplList():
l.append("""<option value="%s">%s</option>"""%(tpl.getId(), tpl.getName()))
vars["notifTpls"] = "\n".join(l)
vars["actionURL"]=quoteattr(str(urlHandlers.UHAbstractsTrackManagerAction.getURL(self._track)))
vars["selectURL"]=quoteattr(str(urlHandlers.UHTrackModifAbstracts.getURL(self._track)))
vars["filterUsed"] = self._filterUsed
vars["resetFiltersURL"] = str(urlHandlers.UHTrackModifAbstracts.getURL(self._track))
vars["pdfIconURL"] = quoteattr(str(Config.getInstance().getSystemIconURL("pdf")))
vars["canModify"] = self._canModify
return vars
class WPTrackModifAbstracts( WPTrackModifBase ):
def __init__(self, rh, track, msg, filterUsed, order):
self._msg = msg
self._filterUsed = filterUsed
self._order = order
WPTrackModifBase.__init__(self, rh, track)
def _setActiveTab( self ):
conf = self._track.getConference()
if not conf.canModify( self._getAW() ):
self._tabMain.disable()
self._tabCoordination.disable()
self._tabAbstracts.setActive()
def _getTabContent( self, params ):
canModify = self._track.getConference().canModify(self._getAW())
wc = WTrackModifAbstracts( self._track, \
params["filterCrit"], \
params["sortingCrit"], \
self._order, \
self._filterUsed, canModify )
pars = { "selectAll": params.get("selectAll", None), \
"directAbstractMsg": escape(self._msg) }
return wc.getHTML(pars)
class WPTrackAbstractModifBase(WPConferenceModifBase):
sidemenu_option = 'program'
def __init__(self, rh, track, abstract):
self._abstract = abstract
self._track = track
WPConferenceModifBase.__init__(self, rh, self._track.getConference())
def _getNavigationDrawer(self):
pars = {"target": self._abstract, "isModif": True, "track": self._track}
return wcomponents.WNavigationDrawer( pars, bgColor="white" )
def _getPageContent( self, params ):
self._createTabCtrl()
banner = wcomponents.WTrackBannerModif(self._track, self._abstract, isManager=self._abstract.getConference().canModify( self._getAW() )).getHTML()
body = wcomponents.WTabControl( self._tabCtrl, self._getAW() ).getHTML( self._getTabContent( params ) )
return banner + body
def _createTabCtrl( self ):
self._tabCtrl = wcomponents.TabControl()
self._tabMain = self._tabCtrl.newTab( "main", _("Main"), \
urlHandlers.UHTrackAbstractModif.getURL(self._track,self._abstract))
nComments=""
if len(self._abstract.getIntCommentList()) > 0:
nComments = " (%s)"%len(self._abstract.getIntCommentList())
self._tabComments=self._tabCtrl.newTab( "comments", _("Internal comments%s")%nComments,\
urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
self._setActiveTab()
def _setActiveTab( self ):
pass
def _getTabContent( self, params ):
return _("nothing")
class WTrackAbstractModification( wcomponents.WTemplated ):
def __init__( self, track, abstract ):
self._track = track
self._abstract = abstract
def _getAuthorHTML( self, auth ):
tmp = "%s (%s)"%(auth.getFullName(), auth.getAffiliation())
tmp = self.htmlText( tmp )
if auth.getEmail() != "":
mailtoSubject = i18nformat("""[%s] _("Abstract") %s: %s""")%( self._track.getConference().getTitle(), self._abstract.getId(), self._abstract.getTitle() )
mailtoURL = "mailto:%s?subject=%s"%( auth.getEmail(), quote( mailtoSubject ) )
href = quoteattr( mailtoURL )
tmp = """<a href=%s>%s</a>"""%(href, tmp)
return tmp
def _getStatusDetailsHTML( self, status ):
res = "%s"%self.htmlText( status.getLabel().upper() )
if isinstance(status, _ASTrackViewPFOT):
l = []
for track in status.getProposedTrackList():
l.append( self.htmlText( track.getTitle() ) )
res = """%s: <br><font size="-1">%s</font>"""%(res, ", ".join(l))
elif isinstance(status, _ASTrackViewPA):
ct=""
if status.getContribType() is not None:
ct=" (%s)"%self.htmlText(status.getContribType().getName())
elif isinstance(status, _ASTrackViewIC):
res = self.htmlText(status.getLabel().upper())
elif isinstance(status, _ASTrackViewDuplicated):
orig=status.getOriginal()
url=urlHandlers.UHAbstractManagment.getURL(orig)
originalHTML="""<a href=%s target="_blank">%s-%s</a>"""%(\
quoteattr(str(url)), \
self.htmlText(orig.getId()),\
self.htmlText(orig.getTitle()))
if self._track.hasAbstract(orig):
url=urlHandlers.UHTrackAbstractModif.getURL(self._track,orig)
originalHTML="<a href=%s>%s-%s</a>"%( quoteattr(str(url)), \
self.htmlText(orig.getId()),\
self.htmlText(orig.getTitle()))
res = """%s (%s)"""%(self.htmlText( status.getLabel().upper()), \
originalHTML)
elif isinstance(status, _ASTrackViewMerged):
target=status.getTarget()
url=urlHandlers.UHAbstractManagment.getURL(target)
originalHTML="""<a href=%s target="_blank">%s-%s</a>"""%(\
quoteattr(str(url)), \
self.htmlText(target.getId()),\
self.htmlText(target.getTitle()))
if self._track.hasAbstract(target):
url=urlHandlers.UHTrackAbstractModif.getURL(self._track,target)
originalHTML="<a href=%s>%s-%s</a>"%( quoteattr(str(url)), \
self.htmlText(target.getId()),\
self.htmlText(target.getTitle()))
res = """%s (%s)"""%(self.htmlText( status.getLabel().upper()), \
originalHTML)
elif isinstance(status,_ASTrackViewAccepted):
if status.getContribType() is not None and \
status.getContribType()!="":
res = "%s as %s"%(self.htmlText(status.getLabel().upper()), \
self.htmlText(status.getContribType().getName()))
return res
def _getLastJudgement(self):
jud = self._abstract.getLastJudgementPerReviewer(self._rh.getAW().getUser(), self._track)
if isinstance(jud, review.AbstractAcceptance):
return "Proposed to be accepted"
elif isinstance(jud, review.AbstractRejection):
return "Proposed to be rejected"
elif isinstance(jud, review.AbstractReallocation):
return "Proposed to for other tracks"
elif isinstance(jud, review.AbstractMarkedAsDuplicated):
return "Marked as duplicated"
elif isinstance(jud, review.AbstractUnMarkedAsDuplicated):
return "Unmarked as duplicated"
return None
def _getLastJudgementComment(self):
jud = self._abstract.getLastJudgementPerReviewer(self._rh.getAW().getUser(), self._track)
return self.htmlText(jud.getComment()) if jud else None
def _getStatusCommentsHTML( self, status ):
comment = ""
if status.getId() in ["accepted", "accepted_other", "rejected",
"withdrawn", "duplicated"]:
comment = self.htmlText( status.getComment() )
elif status.getId() == 'pa':
conflicts = status.getConflicts()
if conflicts:
if comment != "":
comment = "%s<br><br>"%comment
l = []
for jud in conflicts:
if jud.getTrack() != self._track:
l.append( "%s ( %s )"%( jud.getTrack().getTitle(), \
self._getAuthorHTML( jud.getResponsible() ) ) )
comment = i18nformat("""%s<font color="red">_("In conflict with"): <br> %s</font>""")%(comment, "<br>".join(l) )
rl = self._abstract.getReallocationTargetedList( self._track )
if rl:
comment = i18nformat("""%s<br><br><font color="green">_("Proposed by") <i>%s</i>(%s): <br>%s</font>""") % (
comment,
self.htmlText(rl[0].getTrack().getTitle()),
self._getAuthorHTML(rl[0].getResponsible()) if rl[0].getResponsible() else '',
self.htmlText(rl[0].getComment())
)
return comment
def _getContribHTML(self):
res = ""
contrib = self._abstract.getContribution()
if contrib is not None:
url=urlHandlers.UHContributionModification.getURL(contrib)
title=self.htmlText(contrib.getTitle())
id=self.htmlText(contrib.getId())
res = """<a href=%s>%s - %s</a>"""%(quoteattr(str(url)),id,title)
return res
def _getAdditionalFields(self):
fields = []
afm = self._abstract.getConference().getAbstractMgr().getAbstractFieldsMgr()
for f in afm.getActiveFields():
fid = f.getId()
caption = f.getCaption()
fields.append((self.htmlText(caption),
str(self._abstract.getField(fid))))
return fields
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["title"] = self.htmlText( self._abstract.getTitle() )
vars["abstractPDF"] = urlHandlers.UHAbstractTrackManagerDisplayPDF.getURL( self._track, self._abstract )
vars["printIconURL"] = Config.getInstance().getSystemIconURL( "pdf" )
vars["additionalFields"] = self._getAdditionalFields()
primary = []
for author in self._abstract.getPrimaryAuthorList():
primary.append(self._getAuthorHTML(author))
vars["primary_authors"] = "<br>".join( primary )
secondary = []
for author in self._abstract.getCoAuthorList():
secondary.append(self._getAuthorHTML(author))
vars["co_authors"] = "<br>".join( secondary )
speakers = []
for author in self._abstract.getSpeakerList():
speakers.append(self._getAuthorHTML(author))
vars["speakers"] = "<br>".join( speakers )
vars["type"] = i18nformat("""--_("not specified")--""")
if self._abstract.getContribType() is not None:
vars["type"] = self.htmlText( self._abstract.getContribType().getName() )
tracks = []
for track in self._abstract.getTrackListSorted():
tracks.append( """%s"""%track.getTitle() )
vars["tracks"] = "<br>".join( tracks )
vars["submitter"] = self._getAuthorHTML( self._abstract.getSubmitter() )
vars["submissionDate"] = self._abstract.getSubmissionDate().strftime( "%d %B %Y %H:%M" )
vars["modificationDate"] = self._abstract.getModificationDate().strftime( "%d %B %Y %H:%M" )
aStatus = AbstractStatusTrackViewFactory().getStatus( self._track, self._abstract )
vars["statusDetails"] = self._getStatusDetailsHTML( aStatus )
vars["statusComment"] = self._getStatusCommentsHTML( aStatus )
vars["proposeToAccURL"] = quoteattr(str(urlHandlers.UHTrackAbstractPropToAcc.getURL(self._track,self._abstract)))
vars["proposeToRejURL"] = quoteattr(str(urlHandlers.UHTrackAbstractPropToRej.getURL(self._track,self._abstract)))
vars["proposeForOtherTracksURL"] = quoteattr( str( urlHandlers.UHTrackAbstractPropForOtherTrack.getURL( self._track, self._abstract) ) )
vars["comments"] = self._abstract.getComments()
vars["abstractId"] = self._abstract.getId()
vars["showDuplicated"] = False
if aStatus.getId() in ["pa","pr","submitted","pfot"]:
vars["duplicatedURL"] = quoteattr(str(urlHandlers.UHTrackAbstractModMarkAsDup.getURL(self._track,self._abstract)))
vars["isDuplicated"] = False
vars["showDuplicated"] = True
elif aStatus.getId() == "duplicated":
vars["showDuplicated"] = vars["isDuplicated"] = True
vars["duplicatedURL"] = quoteattr(str(urlHandlers.UHTrackAbstractModUnMarkAsDup.getURL(self._track,self._abstract)))
vars["contribution"] = self._getContribHTML()
vars["buttonsStatus"] = "enabled"
if aStatus.getId() in ["accepted", "rejected", "accepted_other"]:
vars["buttonsStatus"] = "disabled"
rating = self._abstract.getRatingPerReviewer(self._rh.getAW().getUser(), self._track)
if (rating == None):
vars["rating"] = ""
else:
vars["rating"] = "%.2f" % rating
vars["lastJudgement"] = self._getLastJudgement()
vars["lastJudgementComment"] = self._getLastJudgementComment()
vars["scaleLower"] = self._abstract.getConference().getConfAbstractReview().getScaleLower()
vars["scaleHigher"] = self._abstract.getConference().getConfAbstractReview().getScaleHigher()
vars["attachments"] = fossilize(self._abstract.getAttachments().values(), ILocalFileAbstractMaterialFossil)
vars["showAcceptButton"] = self._abstract.getConference().getConfAbstractReview().canReviewerAccept()
vars["acceptURL"] = quoteattr(str(urlHandlers.UHTrackAbstractAccept.getURL(self._track, self._abstract)))
vars["rejectURL"] = quoteattr(str(urlHandlers.UHTrackAbstractReject.getURL(self._track, self._abstract)))
return vars
class WPTrackAbstractModif( WPTrackAbstractModifBase ):
def _getTabContent( self, params ):
wc = WTrackAbstractModification( self._track, self._abstract )
return wc.getHTML()
class WPTrackAbstractAccept(WPTrackAbstractModifBase):
def _getTabContent(self, params):
wc = WAbstractManagmentAccept(self._getAW(), self._abstract, self._track)
return wc.getHTML()
class WPTrackAbstractReject(WPTrackAbstractModifBase):
def _getTabContent(self, params):
wc = WAbstractManagmentReject(self._getAW(), self._abstract, self._track)
return wc.getHTML()
class WTrackAbstractPropToAcc( wcomponents.WTemplated ):
def __init__( self, track, abstract ):
self._abstract = abstract
self._track = track
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["abstractTitle"] = self._abstract.getTitle()
vars["trackTitle"] = self._track.getTitle()
vars["postURL"] = urlHandlers.UHTrackAbstractPropToAcc.getURL( self._track, self._abstract )
l = []
conf = self._abstract.getConference()
vars["abstractReview"] = conf.getConfAbstractReview()
for ctype in conf.getContribTypeList():
selected = ""
if vars.has_key("contribType"):
if ctype==vars["contribType"]:
selected = " selected"
elif self._abstract.getContribType()==ctype:
selected = " selected"
l.append( """<option value="%s"%s>%s</option>"""%(ctype.getId(), selected, ctype.getName() ) )
vars["contribTypes"] = ""
if len(l) > 0:
vars["contribTypes"] = i18nformat("""
<tr>
<td nowrap class="titleCellTD"><span class="titleCellFormat">
_("Proposed contribution type"):
</td>
<td>
<select name="contribType">%s</select>
</td>
</tr>
""")%("".join(l))
return vars
class WPTrackAbstractPropToAcc( WPTrackAbstractModifBase ):
def _getTabContent( self, params ):
wc=WTrackAbstractPropToAcc(self._track,self._abstract)
p={"comment":params.get("comment",""),\
"contribType":params.get("contribType",self._abstract.getContribType())}
return wc.getHTML(p)
class WTrackAbstractPropToRej( wcomponents.WTemplated ):
def __init__( self, track, abstract ):
self._abstract = abstract
self._track = track
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["abstractTitle"] = self._abstract.getTitle()
vars["trackTitle"] = self._track.getTitle()
vars["postURL"] = urlHandlers.UHTrackAbstractPropToRej.getURL( self._track, self._abstract )
vars["abstractReview"] = self._abstract.getConference().getConfAbstractReview()
return vars
class WPTrackAbstractPropToRej( WPTrackAbstractModifBase ):
def _getTabContent( self, params ):
wc = WTrackAbstractPropToRej( self._track, self._abstract )
return wc.getHTML()
class WTrackAbstractPropForOtherTrack(wcomponents.WTemplated):
def __init__( self, track, abstract ):
self._abstract = abstract
self._track = track
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["postURL"] = urlHandlers.UHTrackAbstractPropForOtherTrack.getURL( self._track, self._abstract )
vars["abstractTitle"] = self._abstract.getTitle()
vars["trackTitle"] = self._track.getTitle()
l = []
conf = self._abstract.getConference()
for track in conf.getTrackList():
checked, disabled = "", ""
if self._abstract.hasTrack( track ):
checked, disabled = " checked", " disabled"
l.append("""<input type="checkbox" name="selTracks" value="%s"%s%s> %s"""%(track.getId(), checked, disabled, self.htmlText(track.getTitle())))
vars["trackItems"] = "<br>".join( l )
return vars
class WPAbstractPropForOtherTracks( WPTrackAbstractModifBase ):
def _getTabContent( self, params ):
wc = WTrackAbstractPropForOtherTrack( self._track, self._abstract )
return wc.getHTML()
class WPModAbstractMarkAsDup(WPTrackAbstractModifBase):
def _getTabContent( self, params ):
wc = wcomponents.WAbstractModMarkAsDup(self._abstract)
p={"comments":params.get("comments",""),
"id":params.get("originalId",""),
"duplicateURL":urlHandlers.UHTrackAbstractModMarkAsDup.getURL(self._track,self._abstract),
"cancelURL":urlHandlers.UHTrackAbstractModif.getURL(self._track,self._abstract)}
return wc.getHTML(p)
class WPModAbstractUnMarkAsDup(WPTrackAbstractModifBase):
def _getTabContent( self, params ):
wc = wcomponents.WAbstractModUnMarkAsDup(self._abstract)
p={ "comments":params.get("comments",""),
"unduplicateURL":urlHandlers.UHTrackAbstractModUnMarkAsDup.getURL(self._track,self._abstract),
"cancelURL":urlHandlers.UHTrackAbstractModif.getURL(self._track,self._abstract)}
return wc.getHTML(p)
class WTrackModifCoordination( wcomponents.WTemplated ):
def __init__( self, track ):
self._track = track
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["trackId"] = self._track.getId()
vars["confId"] = self._track.getConference().getId()
vars["coordinators"] = fossilize(self._track.getCoordinatorList())
return vars
class WPTrackModifCoordination( WPTrackModifBase ):
def _setActiveTab( self ):
self._tabCoordination.setActive()
def _getTabContent( self, params ):
wc = WTrackModifCoordination( self._track )
return wc.getHTML()
class WPModAbstractIntComments(WPTrackAbstractModifBase):
def _setActiveTab( self ):
self._tabComments.setActive()
def _commentEditURLGen(self,comment):
return urlHandlers.UHTrackAbstractModIntCommentEdit.getURL(self._track,comment)
def _commentRemURLGen(self,comment):
return urlHandlers.UHTrackAbstractModIntCommentRem.getURL(self._track,comment)
def _getTabContent( self, params ):
wc=wcomponents.WAbstractModIntComments(self._getAW(),self._abstract)
p={"newCommentURL":urlHandlers.UHTrackAbstractModIntCommentNew.getURL(self._track,self._abstract),
"commentEditURLGen":self._commentEditURLGen,
"commentRemURLGen":self._commentRemURLGen }
return wc.getHTML(p)
class WPModAbstractIntCommentNew(WPModAbstractIntComments):
def _getTabContent( self, params ):
wc=wcomponents.WAbstractModNewIntComment(self._getAW(),self._abstract)
p={"postURL":urlHandlers.UHTrackAbstractModIntCommentNew.getURL(self._track,self._abstract)}
return wc.getHTML(p)
class WPModAbstractIntCommentEdit(WPModAbstractIntComments):
def __init__(self,rh,track,comment):
self._comment=comment
WPModAbstractIntComments.__init__(self,rh,track,comment.getAbstract())
def _getTabContent( self, params ):
wc=wcomponents.WAbstractModIntCommentEdit(self._comment)
p={"postURL": urlHandlers.UHTrackAbstractModIntCommentEdit.getURL(self._track,self._comment)}
return wc.getHTML(p)
class WTrackModContribList(wcomponents.WTemplated):
def __init__(self,track,filterCrit, sortingCrit, order):
self._track=track
self._conf=self._track.getConference()
self._filterCrit=filterCrit
self._sortingCrit=sortingCrit
self._order = order
self._totaldur =timedelta(0)
def _getURL( self ):
#builds the URL to the contribution list page
# preserving the current filter and sorting status
url = urlHandlers.UHTrackModContribList.getURL(self._track)
if self._filterCrit.getField("type"):
l=[]
for t in self._filterCrit.getField("type").getValues():
if t!="":
l.append(t)
url.addParam("types",l)
if self._filterCrit.getField("type").getShowNoValue():
url.addParam("typeShowNoValue","1")
if self._filterCrit.getField("session"):
url.addParam("sessions",self._filterCrit.getField("session").getValues())
if self._filterCrit.getField("session").getShowNoValue():
url.addParam("sessionShowNoValue","1")
if self._filterCrit.getField("status"):
url.addParam("status",self._filterCrit.getField("status").getValues())
if self._sortingCrit.getField():
url.addParam("sortBy",self._sortingCrit.getField().getId())
url.addParam("order","down")
url.addParam("OK","1")
return url
def _getContribHTML(self,contrib):
sdate = ""
if contrib.isScheduled():
sdate=contrib.getStartDate().strftime("%Y-%b-%d %H:%M" )
title = """<a href=%s>%s</a>"""%(quoteattr(str(urlHandlers.UHContributionModification.getURL(contrib))), self.htmlText(contrib.getTitle()))
strdur = ""
if contrib.getDuration() is not None and contrib.getDuration().seconds != 0:
strdur = (datetime(1900,1,1)+ contrib.getDuration()).strftime("%Hh%M'")
dur = contrib.getDuration()
self._totaldur = self._totaldur + dur
l = []
for spk in contrib.getSpeakerList():
l.append( self.htmlText( spk.getFullName() ) )
speaker = "<br>".join( l )
session = ""
if contrib.getSession() is not None:
session=self.htmlText(contrib.getSession().getCode())
cType=""
if contrib.getType() is not None:
cType=self.htmlText(contrib.getType().getName())
status=contrib.getCurrentStatus()
statusCaption=ContribStatusList().getCode(status.__class__)
html = """
<tr>
<td><input type="checkbox" name="contributions" value=%s></td>
<td valign="top" class="abstractLeftDataCell">%s</td>
<td valign="top" nowrap class="abstractDataCell">%s</td>
<td valign="top" class="abstractDataCell">%s</td>
<td valign="top" class="abstractDataCell">%s</td>
<td valign="top" class="abstractDataCell">%s</td>
<td valign="top" class="abstractDataCell">%s</td>
<td valign="top" class="abstractDataCell">%s</td>
<td valign="top" class="abstractDataCell">%s</td>
</tr>
"""%(contrib.getId(), self.htmlText(contrib.getId()),\
sdate or " ",strdur or " ",cType or " ",title or " ",\
speaker or " ",session or " ",\
statusCaption or " ")
return html
def _getTypeItemsHTML(self):
checked=""
if self._filterCrit.getField("type").getShowNoValue():
checked=" checked"
res=[ i18nformat("""<input type="checkbox" name="typeShowNoValue" value="--none--"%s> --_("not specified")--""")%checked]
for t in self._conf.getContribTypeList():
checked=""
if t.getId() in self._filterCrit.getField("type").getValues():
checked=" checked"
res.append("""<input type="checkbox" name="types" value=%s%s> %s"""%(quoteattr(str(t.getId())),checked,self.htmlText(t.getName())))
return "<br>".join(res)
def _getSessionItemsHTML(self):
checked=""
if self._filterCrit.getField("session").getShowNoValue():
checked=" checked"
res=[ i18nformat("""<input type="checkbox" name="sessionShowNoValue" value="--none--"%s> --_("not specified")--""")%checked]
for s in self._conf.getSessionListSorted():
checked=""
if s.getId() in self._filterCrit.getField("session").getValues():
checked=" checked"
res.append("""<input type="checkbox" name="sessions" value=%s%s> (%s) %s"""%(quoteattr(str(s.getId())),checked,self.htmlText(s.getCode()),self.htmlText(s.getTitle())))
return "<br>".join(res)
def _getStatusItemsHTML(self):
res=[]
for st in ContribStatusList().getList():
id=ContribStatusList().getId(st)
checked=""
if id in self._filterCrit.getField("status").getValues():
checked=" checked"
code=ContribStatusList().getCode(st)
caption=ContribStatusList().getCaption(st)
res.append("""<input type="checkbox" name="status" value=%s%s> (%s) %s"""%(quoteattr(str(id)),checked,self.htmlText(code),self.htmlText(caption)))
return "<br>".join(res)
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["quickAccessURL"]=quoteattr(str(urlHandlers.UHTrackModContribQuickAccess.getURL(self._track)))
vars["filterPostURL"]=quoteattr(str(urlHandlers.UHTrackModContribList.getURL(self._track)))
vars["authSearch"]=""
authField=self._filterCrit.getField("author")
if authField is not None:
vars["authSearch"]=quoteattr(str(authField.getValues()[0]))
vars["types"]=self._getTypeItemsHTML()
vars["sessions"]=self._getSessionItemsHTML()
vars["status"]=self._getStatusItemsHTML()
sortingField = self._sortingCrit.getField()
self._currentSorting=""
if sortingField is not None:
self._currentSorting=sortingField.getId()
vars["currentSorting"]=""
url=self._getURL()
url.addParam("sortBy","number")
vars["numberImg"]=""
if self._currentSorting == "number":
vars["currentSorting"] ="""<input type="hidden" name="sortBy" value="number">"""
if self._order == "down":
vars["numberImg"] = """<img src=%s alt="down">"""%(quoteattr(Config.getInstance().getSystemIconURL("downArrow")))
url.addParam("order","up")
elif self._order == "up":
vars["numberImg"] = """<img src=%s alt="up">"""%(quoteattr(Config.getInstance().getSystemIconURL("upArrow")))
url.addParam("order","down")
vars["numberSortingURL"]=quoteattr(str(url))
url = self._getURL()
url.addParam("sortBy", "date")
vars["dateImg"] = ""
if self._currentSorting == "date":
vars["currentSorting"]="""<input type="hidden" name="sortBy" value="date">"""
if self._order == "down":
vars["dateImg"]="""<img src=%s alt="down">"""%(quoteattr(Config.getInstance().getSystemIconURL("downArrow")))
url.addParam("order","up")
elif self._order == "up":
vars["dateImg"]="""<img src=%s alt="up">"""%(quoteattr(Config.getInstance().getSystemIconURL("upArrow")))
url.addParam("order","down")
vars["dateSortingURL"]=quoteattr(str(url))
url = self._getURL()
url.addParam("sortBy", "name")
vars["titleImg"] = ""
if self._currentSorting == "name":
vars["currentSorting"]="""<input type="hidden" name="sortBy" value="name">"""
if self._order == "down":
vars["titleImg"]="""<img src=%s alt="down">"""%(quoteattr(Config.getInstance().getSystemIconURL("downArrow")))
url.addParam("order","up")
elif self._order == "up":
vars["titleImg"]="""<img src=%s alt="up">"""%(quoteattr(Config.getInstance().getSystemIconURL("upArrow")))
url.addParam("order","down")
vars["titleSortingURL"]=quoteattr(str(url))
url = self._getURL()
url.addParam("sortBy", "speaker")
vars["speakerImg"]=""
if self._currentSorting=="speaker":
vars["currentSorting"] = """<input type="hidden" name="sortBy" value="speaker">"""
if self._order == "down":
vars["speakerImg"] = """<img src=%s alt="down">"""%(quoteattr(Config.getInstance().getSystemIconURL("downArrow")))
url.addParam("order","up")
elif self._order == "up":
vars["speakerImg"] = """<img src=%s alt="up">"""%(quoteattr(Config.getInstance().getSystemIconURL("upArrow")))
url.addParam("order","down")
vars["speakerSortingURL"]=quoteattr( str( url ) )
url = self._getURL()
url.addParam("sortBy","session")
vars["sessionImg"] = ""
if self._currentSorting == "session":
vars["currentSorting"] = """<input type="hidden" name="sortBy" value="session">"""
if self._order == "down":
vars["sessionImg"] = """<img src=%s alt="down">"""%(quoteattr(Config.getInstance().getSystemIconURL("downArrow")))
url.addParam("order","up")
elif self._order == "up":
vars["sessionImg"] = """<img src=%s alt="up">"""%(quoteattr(Config.getInstance().getSystemIconURL("upArrow")))
url.addParam("order","down")
vars["sessionSortingURL"] = quoteattr( str( url ) )
url = self._getURL()
url.addParam("sortBy", "type")
vars["typeImg"] = ""
if self._currentSorting == "type":
vars["currentSorting"]="""<input type="hidden" name="sortBy" value="type">"""
if self._order == "down":
vars["typeImg"]="""<img src=%s alt="down">"""%(quoteattr(Config.getInstance().getSystemIconURL("downArrow")))
url.addParam("order","up")
elif self._order == "up":
vars["typeImg"]="""<img src=%s alt="up">"""%(quoteattr(Config.getInstance().getSystemIconURL("upArrow")))
url.addParam("order","down")
vars["typeSortingURL"] = quoteattr( str( url ) )
f=filters.SimpleFilter(self._filterCrit,self._sortingCrit)
numContribs=0
l=[]
contribsToPrint = []
for contrib in f.apply(self._track.getContributionList()):
l.append(self._getContribHTML(contrib))
numContribs+=1
contribsToPrint.append("""<input type="hidden" name="contributions" value="%s">"""%contrib.getId())
if self._order =="up":
l.reverse()
vars["contributions"] = "".join(l)
vars["contribsToPrint"] = "".join(contribsToPrint)
vars["numContribs"]=str(numContribs)
vars["contributionActionURL"]=quoteattr(str(urlHandlers.UHTrackModContributionAction.getURL(self._track)))
vars["contributionsPDFURL"]=quoteattr(str(urlHandlers.UHTrackModToPDF.getURL(self._track)))
vars["participantListURL"]=quoteattr(str(urlHandlers.UHTrackModParticipantList.getURL(self._track)))
totaldur = self._totaldur
days = totaldur.days
hours = (totaldur.seconds)/3600
dayhours = (days * 24)+hours
mins = ((totaldur.seconds)/60)-(hours*60)
vars["totaldur" ]="""%sh%sm"""%(dayhours,mins)
return vars
class WPModContribList(WPTrackModifBase):
def _setActiveTab( self ):
conf = self._track.getConference()
if not conf.canModify( self._getAW() ):
self._tabMain.disable()
self._tabCoordination.disable()
self._hidingTrackTabs = True
self._tabContribs.setActive()
def _getTabContent( self, params ):
filterCrit=params.get("filterCrit",None)
sortingCrit=params.get("sortingCrit",None)
order = params.get("order","down")
wc=WTrackModContribList(self._track,filterCrit, sortingCrit, order)
return wc.getHTML()
class WPModParticipantList( WPTrackModifBase ):
def __init__(self, rh, conf, emailList, displayedGroups, contribs):
WPTrackModifBase.__init__(self, rh, conf)
self._emailList = emailList
self._displayedGroups = displayedGroups
self._contribs = contribs
def _getBody( self, params ):
WPTrackModifBase._getBody(self, params)
wc = WContribParticipantList(self._conf, self._emailList, self._displayedGroups, self._contribs)
params = {"urlDisplayGroup":urlHandlers.UHTrackModParticipantList.getURL(self._track)}
return wc.getHTML(params)
|
XeCycle/indico
|
indico/MaKaC/webinterface/pages/tracks.py
|
Python
|
gpl-3.0
| 58,388
|
import sys
import argparse
import logging
from datetime import datetime
import numpy
def findStartOfLUMO(mo):
moFile = open(mo, "r")
for line in moFile:
if "alpha electrons" in line:
return int(line.split(None, 1)[0])
def findLUMOEnergy(MO):
with open(MO, 'r') as file:
for line in file:
# Extract the first LUMO orbital energy
if "Alpha virt. eigenvalues --" in line:
return line.split('Alpha virt. eigenvalues --')[1].split()[0]
# Process command line arguments
parser = argparse.ArgumentParser(description='input parameters.')
parser.add_argument('-f', '--fock', dest = 'fock', type = str, required = True, help = 'fock matrix')
parser.add_argument('-mo1', '--MO1', dest = 'MO1', type = str, required = True, help = 'molecular orbitals for molecule 1')
parser.add_argument('-mo2', '--MO2', dest = 'MO2', type = str, required = True, help = 'molecular orbitals for molecule 2')
parser.add_argument('-l1', '--LUMO1', dest = 'l1', type = int, choices=set((0,1)), required = True, help = '0 - extract LUMO orbital, 1 - extract HOMO orbital; for molecule 1')
parser.add_argument('-l2', '--LUMO2', dest = 'l2', type = int, choices=set((0,1)), required = True, help = '0 - extract LUMO orbital, 1 - extract HOMO orbital; for molecule 2')
args = parser.parse_args()
# Load matrices
fock_matrix = numpy.loadtxt(args.fock)
molecule1_matrix = numpy.loadtxt(args.MO1)
molecule2_matrix = numpy.loadtxt(args.MO2)
k = fock_matrix.shape[0]/2
# Get the start of the lumo orbitals
mo1_cols = findStartOfLUMO(args.MO1[:-3] + "com.log")
mo2_cols = findStartOfLUMO(args.MO2[:-3] + "com.log")
# Choose between HOMO and LUMO orbitals for each molecule
if args.l1 == 1:
mo1_cols -= 1
if args.l2 == 1:
mo2_cols -= 1
LUMO1 = molecule1_matrix[:,mo1_cols]
LUMO2 = molecule2_matrix[:,mo2_cols]
result = 0
for i in range(len(LUMO1)):
for j in range(len(LUMO2)):
result += LUMO1[i]*LUMO2[j]*fock_matrix[i][j+len(LUMO1)]
print "J: ", result*627.51, "kcal"
LUMO1Energy = float(findLUMOEnergy(args.MO1[:-3] + "com.log"))
LUMO2Energy = float(findLUMOEnergy(args.MO2[:-3] + "com.log"))
dG = LUMO2Energy - LUMO1Energy
print "Molecule 1 energy: ", LUMO1Energy, "hartrees"
print "Molecule 2 energy: ", LUMO2Energy, "hartrees"
print "dG: ", dG*627.51, "kcal"
|
NikTRSK/Electron-Charge-Transfer-Calculations
|
calcJ.py
|
Python
|
gpl-3.0
| 2,288
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_transform.test_case."""
import re
from tensorflow_transform import test_case
import unittest
class TftUnitTest(test_case.TransformTestCase):
def testCrossNamedParameters(self):
test_cases_1 = [
{'testcase_name': 'a_1_b_1', 'a': 1, 'b': 1},
{'testcase_name': 'a_3_b_3', 'a': 3, 'b': 3},
]
test_cases_2 = [
{'testcase_name': 'c_2', 'c': 2},
{'testcase_name': 'c_4', 'c': 4},
]
expected_cross = [
{'testcase_name': 'a_1_b_1_c_2', 'a': 1, 'b': 1, 'c': 2},
{'testcase_name': 'a_1_b_1_c_4', 'a': 1, 'b': 1, 'c': 4},
{'testcase_name': 'a_3_b_3_c_2', 'a': 3, 'b': 3, 'c': 2},
{'testcase_name': 'a_3_b_3_c_4', 'a': 3, 'b': 3, 'c': 4},
]
self.assertEqual(
test_case.cross_named_parameters(test_cases_1, test_cases_2),
expected_cross)
def testCrossParameters(self):
test_cases_1 = [('a', 1), ('b', 2)]
test_cases_2 = [(True,), (False,)]
expected_cross = [
('a', 1, True), ('b', 2, True),
('a', 1, False), ('b', 2, False),
]
self.assertCountEqual(
test_case.cross_parameters(test_cases_1, test_cases_2), expected_cross)
def testAssertDataCloseOrEqual(self):
self.assertDataCloseOrEqual([{'a': 'first',
'b': 1.0,
'c': 5,
'd': ('second', 2.0)},
{'e': 2,
'f': 3}],
[{'a': 'first',
'b': 1.0000001,
'c': 5,
'd': ('second', 2.0000001)},
{'e': 2,
'f': 3}])
with self.assertRaisesRegexp(AssertionError, r'len\(.*\) != len\(\[\]\)'):
self.assertDataCloseOrEqual([{'a': 1}], [])
with self.assertRaisesRegexp(
AssertionError,
re.compile('Element counts were not equal.*: Row 0', re.DOTALL)):
self.assertDataCloseOrEqual([{'a': 1}], [{'b': 1}])
with self.assertRaisesRegexp(
AssertionError,
re.compile('Not equal to tolerance.*: Row 0, key a', re.DOTALL)):
self.assertDataCloseOrEqual([{'a': 1}], [{'a': 2}])
@test_case.parameters((1, 'a'), (2, 'b'))
def testSampleParametrizedTestMethod(self, my_arg, my_other_arg):
self.assertIn((my_arg, my_other_arg), {(1, 'a'), (2, 'b')})
if __name__ == '__main__':
unittest.main()
|
tensorflow/transform
|
tensorflow_transform/test_case_test.py
|
Python
|
apache-2.0
| 3,131
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
from buildbot.clients import base
from twisted.cred import credentials
from twisted.cred import error
from twisted.internet import reactor
from twisted.spread import pb
class TextClient:
def __init__(self, master, events="steps", username="statusClient", passwd="clientpw"):
"""
@type master: string
@param master: a host:port string to masters L{buildbot.status.client.PBListener}
@type username: string
@param username:
@type passwd: string
@param passwd:
@type events: string, one of builders, builds, steps, logs, full
@param events: specify what level of detail should be reported.
- 'builders': only announce new/removed Builders
- 'builds': also announce builderChangedState, buildStarted, and
buildFinished
- 'steps': also announce buildETAUpdate, stepStarted, stepFinished
- 'logs': also announce stepETAUpdate, logStarted, logFinished
- 'full': also announce log contents
"""
self.master = master
self.username = username
self.passwd = passwd
self.listener = base.StatusClient(events)
def run(self):
"""Start the TextClient."""
self.startConnecting()
reactor.run()
def startConnecting(self):
try:
host, port = re.search(r'(.+):(\d+)', self.master).groups()
port = int(port)
except:
print "unparseable master location '%s'" % self.master
print " expecting something more like localhost:8007"
raise
cf = pb.PBClientFactory()
creds = credentials.UsernamePassword(self.username, self.passwd)
d = cf.login(creds)
reactor.connectTCP(host, port, cf)
d.addCallbacks(self.connected, self.not_connected)
return d
def connected(self, ref):
ref.notifyOnDisconnect(self.disconnected)
self.listener.connected(ref)
def not_connected(self, why):
if why.check(error.UnauthorizedLogin):
print """
Unable to login.. are you sure we are connecting to a
buildbot.status.client.PBListener port and not to the slaveport?
"""
reactor.stop()
return why
def disconnected(self, ref):
print "lost connection"
# we can get here in one of two ways: the buildmaster has
# disconnected us (probably because it shut itself down), or because
# we've been SIGINT'ed. In the latter case, our reactor is already
# shut down, but we have no easy way of detecting that. So protect
# our attempt to shut down the reactor.
try:
reactor.stop()
except RuntimeError:
pass
|
mitya57/debian-buildbot
|
buildbot/clients/text.py
|
Python
|
gpl-2.0
| 3,451
|
#!/usr/bin/env python
from setuptools import setup, find_packages
VERSION = __import__('cache_panel').__version__
try:
long_description = open('README.rst', 'rt').read()
except IOError:
long_description = ''
setup(
name='django-cache-panel',
version=VERSION,
description='A more detailed cache panel for the Django Debug Toolbar',
long_description=long_description,
author='Brandon Konkle',
author_email='brandon@lincolnloop.com',
url='http://github.com/lincolnloop/django-cache-panel',
packages=find_packages(),
provides=['cache_panel'],
requires=['Django', 'debug_toolbar'],
include_package_data=True,
zip_safe=False,
)
|
lincolnloop/django-cache-panel
|
setup.py
|
Python
|
bsd-3-clause
| 682
|
"""
Copyright (c) 2017, Battelle Memorial Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the
United States Government. Neither the United States Government nor the United
States Department of Energy, nor Battelle, nor any of their employees, nor any
jurisdiction or organization that has cooperated in the development of these
materials, makes any warranty, express or implied, or assumes any legal
liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or
represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or service by
trade name, trademark, manufacturer, or otherwise does not necessarily
constitute or imply its endorsement, recommendation, or favoring by the
United States Government or any agency thereof, or Battelle Memorial Institute.
The views and opinions of authors expressed herein do not necessarily state or
reflect those of the United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by
BATTELLE
for the
UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
"""
import math
from .common import check_date, create_table_key, pre_conditions, check_run_status, setpoint_control_check
from volttron.platform.agent.math_utils import mean
INCONSISTENT_DATE = -89.2
INSUFFICIENT_DATA = -79.2
DUCT_STC_RCX = "Duct Static Pressure Set Point Control Loop Dx"
DUCT_STC_RCX1 = "Low Duct Static Pressure Dx"
DUCT_STC_RCX2 = "High Duct Static Pressure Dx"
DX = "/diagnostic message"
DX_LIST = [DUCT_STC_RCX, DUCT_STC_RCX1, DUCT_STC_RCX2]
class DuctStaticAIRCx(object):
"""Air-side HVAC Self-Correcting Diagnostic: Detect and correct
duct static pressure problems.
"""
def __init__(self, no_req_data, data_window, auto_correct_flag, stpt_deviation_thr,
max_stcpr_stpt, stcpr_retuning, zn_high_dmpr_thr,
zn_low_dmpr_thr, hdzn_dmpr_thr, min_stcpr_stpt,
analysis, stcpr_stpt_cname):
# Initialize data arrays
self.table_key = None
self.stcpr_stpt_array = []
self.stcpr_array = []
self.timestamp_array = []
# Initialize configurable thresholds
self.analysis = analysis
self.stcpr_stpt_cname = stcpr_stpt_cname
self.no_req_data = no_req_data
self.stpt_deviation_thr = stpt_deviation_thr
self.max_stcpr_stpt = max_stcpr_stpt
self.stcpr_retuning = stcpr_retuning
self.zn_high_dmpr_thr = zn_high_dmpr_thr
self.zn_low_dmpr_thr = zn_low_dmpr_thr
self.data_window = data_window
self.auto_correct_flag = auto_correct_flag
self.min_stcpr_stpt = float(min_stcpr_stpt)
self.hdzn_dmpr_thr = hdzn_dmpr_thr
self.ls_dmpr_low_avg = []
self.ls_dmpr_high_avg = []
self.hs_dmpr_high_avg = []
self.low_sf_condition = []
self.high_sf_condition = []
self.dx_offset = 0.0
def reinitialize(self):
"""
Reinitialize data arrays.
:return:
"""
self.table_key = None
self.stcpr_stpt_array = []
self.stcpr_array = []
self.timestamp_array = []
self.ls_dmpr_low_avg = []
self.ls_dmpr_high_avg = []
self.hs_dmpr_high_avg = []
self.low_sf_condition = []
self.high_sf_condition = []
def stcpr_aircx(self, current_time, stcpr_stpt_data, stcpr_data,
zn_dmpr_data, low_sf_cond, high_sf_cond, dx_result):
"""
Check duct static pressure AIRCx pre-requisites and manage analysis data set.
:param current_time:
:param stcpr_stpt_data:
:param stcpr_data:
:param zn_dmpr_data:
:param low_sf_cond:
:param high_sf_cond:
:param dx_result:
:return:
"""
if check_date(current_time, self.timestamp_array):
dx_result = pre_conditions(INCONSISTENT_DATE, DX_LIST, self.analysis, current_time, dx_result)
self.reinitialize()
run_status = check_run_status(self.timestamp_array, current_time, self.no_req_data, self.data_window)
if run_status is None:
dx_result.log("{} - Insufficient data to produce a valid diagnostic result.".format(current_time))
dx_result = pre_conditions(INSUFFICIENT_DATA, DX_LIST, self.analysis, current_time, dx_result)
self.reinitialize()
if run_status:
self.table_key = create_table_key(self.analysis, self.timestamp_array[-1])
avg_stcpr_stpt, dx_table, dx_result = setpoint_control_check(self.stcpr_stpt_array,
self.stcpr_array,
self.stpt_deviation_thr,
DUCT_STC_RCX,
self.dx_offset,
dx_result)
dx_result.insert_table_row(self.table_key, dx_table)
dx_result = self.low_stcpr_aircx(dx_result, avg_stcpr_stpt)
dx_result = self.high_stcpr_aircx(dx_result, avg_stcpr_stpt)
self.reinitialize()
self.stcpr_array.append(mean(stcpr_data))
if stcpr_stpt_data:
self.stcpr_stpt_array.append(mean(stcpr_stpt_data))
zn_dmpr_data.sort(reverse=False)
self.ls_dmpr_low_avg.extend(zn_dmpr_data[:int(math.ceil(len(zn_dmpr_data) * 0.5)) if len(zn_dmpr_data) != 1 else 1])
self.ls_dmpr_high_avg.extend(zn_dmpr_data[int(math.ceil(len(zn_dmpr_data) * 0.5)) - 1 if len(zn_dmpr_data) != 1 else 0:])
zn_dmpr_data.sort(reverse=True)
self.hs_dmpr_high_avg.extend(zn_dmpr_data[:int(math.ceil(len(zn_dmpr_data) * 0.5)) if len(zn_dmpr_data) != 1 else 1])
self.low_sf_condition.append(low_sf_cond if low_sf_cond is not None else 0)
self.high_sf_condition.append(high_sf_cond if high_sf_cond is not None else 0)
self.timestamp_array.append(current_time)
return dx_result
def low_stcpr_aircx(self, dx_result, avg_stcpr_stpt):
"""
AIRCx to identify and correct low duct static pressure.
:param dx_result:
:param avg_stcpr_stpt:
:param low_sf_condition:
:return:
"""
dmpr_low_avg = mean(self.ls_dmpr_low_avg)
dmpr_high_avg = mean(self.ls_dmpr_high_avg)
low_sf_condition = True if sum(self.low_sf_condition)/len(self.low_sf_condition) > 0.5 else False
thresholds = zip(self.zn_high_dmpr_thr.items(), self.zn_low_dmpr_thr.items())
diagnostic_msg = {}
for (key, zn_high_dmpr_thr), (key2, zn_low_dmpr_thr) in thresholds:
if dmpr_high_avg > zn_high_dmpr_thr and dmpr_low_avg > zn_low_dmpr_thr:
if low_sf_condition is not None and low_sf_condition:
msg = "{} - duct static pressure too low. Supply fan at maximum.".format(key)
result = 15.1
elif avg_stcpr_stpt is None:
# Create diagnostic message for fault
# when duct static pressure set point
# is not available.
msg = "{} - duct static pressure is too low but set point data is not available.".format(key)
result = 14.1
elif self.auto_correct_flag and self.auto_correct_flag == key:
aircx_stcpr_stpt = avg_stcpr_stpt + self.stcpr_retuning
if aircx_stcpr_stpt <= self.max_stcpr_stpt:
dx_result.command(self.stcpr_stpt_cname, aircx_stcpr_stpt)
stcpr_stpt = "%s" % float("%.2g" % aircx_stcpr_stpt)
stcpr_stpt = stcpr_stpt + " in. w.g."
msg = "{} - duct static pressure too low. Set point increased to: {}".format(key,
stcpr_stpt)
result = 11.1
else:
dx_result.command(self.stcpr_stpt_cname, self.max_stcpr_stpt)
stcpr_stpt = "%s" % float("%.2g" % self.max_stcpr_stpt)
stcpr_stpt = stcpr_stpt + " in. w.g."
msg = "{} - duct static pressure too low. Set point increased to max {}.".format(key,
stcpr_stpt)
result = 12.1
else:
msg = "{} - duct static pressure is too low but auto-correction is not enabled.".format(key)
result = 13.1
else:
msg = "{} - no retuning opportunities detected for Low duct static pressure diagnostic.".format(key)
result = 10.0
diagnostic_msg.update({key: result})
dx_result.log(msg)
dx_result.insert_table_row(self.table_key, {DUCT_STC_RCX1 + DX: diagnostic_msg})
return dx_result
def high_stcpr_aircx(self, dx_result, avg_stcpr_stpt):
"""
AIRCx to identify and correct high duct static pressure.
:param dx_result:
:param avg_stcpr_stpt:
:param high_sf_condition:
:return:
"""
high_sf_condition = True if sum(self.high_sf_condition) / len(self.high_sf_condition) > 0.5 else False
dmpr_high_avg = mean(self.hs_dmpr_high_avg)
diagnostic_msg = {}
for key, hdzn_dmpr_thr in self.hdzn_dmpr_thr.items():
if dmpr_high_avg <= hdzn_dmpr_thr:
if high_sf_condition is not None and high_sf_condition:
msg = "{} - duct static pressure too high. Supply fan at minimum.".format(key)
result = 25.1
elif avg_stcpr_stpt is None:
# Create diagnostic message for fault
# when duct static pressure set point
# is not available.
msg = "{} - duct static pressure is too high but set point data is not available.".format(key)
result = 24.1
elif self.auto_correct_flag and self.auto_correct_flag == key:
aircx_stcpr_stpt = avg_stcpr_stpt - self.stcpr_retuning
if aircx_stcpr_stpt >= self.min_stcpr_stpt:
dx_result.command(self.stcpr_stpt_cname, aircx_stcpr_stpt)
stcpr_stpt = "%s" % float("%.2g" % aircx_stcpr_stpt)
stcpr_stpt = stcpr_stpt + " in. w.g."
msg = "{} - duct static pressure too high. Set point decreased to: {}".format(key,
stcpr_stpt)
result = 21.1
else:
dx_result.command(self.stcpr_stpt_cname, self.min_stcpr_stpt)
stcpr_stpt = "%s" % float("%.2g" % self.min_stcpr_stpt)
stcpr_stpt = stcpr_stpt + " in. w.g."
msg = "{} - duct static pressure too high. Set point decreased to min {}.".format(key,
stcpr_stpt)
result = 22.1
else:
msg = "{} - duct static pressure is too high but auto-correction is not enabled.".format(key)
result = 23.1
else:
msg = "{} - No retuning opportunities detected for high duct static pressure diagnostic.".format(key)
result = 20.0
diagnostic_msg.update({key: result})
dx_result.log(msg)
dx_result.insert_table_row(self.table_key, {DUCT_STC_RCX2 + DX: diagnostic_msg})
return dx_result
|
rlutes/volttron-applications
|
pnnl/AirsideRCxAgent/airside/diagnostics/stcpr_aircx.py
|
Python
|
bsd-3-clause
| 13,674
|
from six import text_type as unicode
import unittest
import os
from robot.utils.argumentparser import ArgumentParser
from robot.utils.asserts import (assert_equals, assert_raises,
assert_raises_with_msg, assert_true)
from robot.errors import Information, DataError, FrameworkError
from robot.version import get_full_version
USAGE = """Example Tool -- Stuff before hyphens is considered name
Usage: robot.py [options] datafile
Version: <VERSION>
Options:
-d --reportdir dir Explanation
-r --reportfile file This explanation continues ............... 78
........... to multiple lines.
Next line is totally empty.
-E --escape what:with * Line below has nothing after '*'. Next line has
nothing after value and next nothing after option name
-v --variable name:value *
-N --name name
-t -T --toggle Something
-h -? --help
--version Explanation
-z No long option so not an option line.
--z No long option here either
this line doesn't start with a '-' so not an --optionline
-\\-option escaped 1
-o -\\-option escaped 2
--ignored options cannot be this far
--ignored
* denotes options that can be set multiple times
"""
USAGE2 = """Just Name Here
usage: robot.py [options] arg1 arg2
options:
-v --variable name=value
-x --var-able name=v1,v2 Explanation
-3 --42
--help
"""
class TestArgumentParserInit(unittest.TestCase):
def setUp(self):
self.ap = ArgumentParser(USAGE)
def assert_long_opts(self, expected, ap=None):
expected += ['no' + e for e in expected if not e.endswith('=')]
long_opts = (ap or self.ap)._long_opts
assert_equals(sorted(long_opts), sorted(expected))
def assert_short_opts(self, expected, ap=None):
assert_equals((ap or self.ap)._short_opts, expected)
def assert_multi_opts(self, expected, ap=None):
assert_equals((ap or self.ap)._multi_opts, expected)
def assert_flag_opts(self, expected, ap=None):
assert_equals((ap or self.ap)._flag_opts, expected)
def test_short_options(self):
self.assert_short_opts('d:r:E:v:N:tTh?')
def test_long_options(self):
self.assert_long_opts(['reportdir=', 'reportfile=', 'escape=',
'variable=', 'name=', 'toggle', 'help',
'version'])
def test_multi_options(self):
self.assert_multi_opts(['escape', 'variable'])
def test_flag_options(self):
self.assert_flag_opts(['toggle', 'help', 'version'])
def test_options_must_be_indented_by_1_to_four_spaces(self):
ap = ArgumentParser('''Name
1234567890
--notin this option is not indented at all and thus ignored
--opt1
--opt2 This option is 4 spaces from left -> included
-o --opt3 argument It doesn't matter how far the option gets.
--notopt This option is 5 spaces from left -> not included
-i --ignored
--not-in-either
--included back in four space indentation''')
self.assert_long_opts(['opt1', 'opt2', 'opt3=', 'included'], ap)
def test_case_insensitive_long_options(self):
ap = ArgumentParser(' -f --foo\n -B --BAR\n')
self.assert_short_opts('fB', ap)
self.assert_long_opts(['foo', 'bar'], ap)
def test_same_option_multiple_times(self):
for usage in [' --foo\n --foo\n',
' --foo\n -f --Foo\n',
' -x --foo xxx\n -y --Foo yyy\n',
' -f --foo\n -f --bar\n']:
assert_raises(FrameworkError, ArgumentParser, usage)
ap = ArgumentParser(' -f --foo\n -F --bar\n')
self.assert_short_opts('fF', ap)
self.assert_long_opts(['foo', 'bar'], ap)
def test_same_option_multiple_times_with_no_prefix(self):
for usage in [' --foo\n --nofoo\n',
' --nofoo\n --foo\n'
' --nose size\n --se\n']:
assert_raises(FrameworkError, ArgumentParser, usage)
ap = ArgumentParser(' --foo value\n --nofoo value\n')
self.assert_long_opts(['foo=', 'nofoo='], ap)
class TestArgumentParserParseArgs(unittest.TestCase):
def setUp(self):
self.ap = ArgumentParser(USAGE)
def test_missing_argument_file_throws_data_error(self):
inargs = '--argumentfile missing_argument_file_that_really_is_not_there.txt'.split()
self.assertRaises(DataError, self.ap.parse_args, inargs)
def test_single_options(self):
inargs = '-d reports --reportfile reps.html -T arg'.split()
opts, args = self.ap.parse_args(inargs)
assert_equals(opts, {'reportdir': 'reports', 'reportfile': 'reps.html',
'variable': [], 'name': None, 'toggle': True})
def test_multi_options(self):
inargs = '-v a:1 -v b:2 --name my_name --variable c:3 arg'.split()
opts, args = self.ap.parse_args(inargs)
assert_equals(opts, {'variable': ['a:1','b:2','c:3'], 'name':'my_name',
'reportdir': None, 'reportfile': None,
'toggle': None})
assert_equals(args, ['arg'])
def test_flag_options(self):
for inargs, exp in [('', None),
('--name whatever', None),
('--toggle', True),
('-T', True),
('--toggle --name whatever -t', True),
('-t -T --toggle', True),
('--notoggle', False),
('--notoggle --name xxx --notoggle', False),
('--toggle --notoggle', False),
('-t -t -T -T --toggle -T --notoggle', False),
('--notoggle --toggle --notoggle', False),
('--notoggle --toggle', True),
('--notoggle --notoggle -T', True)]:
opts, args = self.ap.parse_args(inargs.split() + ['arg'])
assert_equals(opts['toggle'], exp, inargs)
assert_equals(args, ['arg'])
def test_flag_option_with_no_prefix(self):
ap = ArgumentParser(' -S --nostatusrc\n --name name')
for inargs, exp in [('', None),
('--name whatever', None),
('--nostatusrc', False),
('-S', False),
('--nostatusrc -S --nostatusrc -S -S', False),
('--statusrc', True),
('--statusrc --statusrc -S', False),
('--nostatusrc --nostatusrc -S --statusrc', True)]:
opts, args = ap.parse_args(inargs.split() + ['arg'])
assert_equals(opts['statusrc'], exp, inargs)
assert_equals(args, ['arg'])
def test_single_option_multiple_times(self):
for inargs in ['--name Foo -N Bar arg',
'-N Zap --name Foo --name Bar arg',
'-N 1 -N 2 -N 3 -t --variable foo -N 4 --name Bar arg']:
opts, args = self.ap.parse_args(inargs.split())
assert_equals(opts['name'], 'Bar')
assert_equals(args, ['arg'])
def test_case_insensitive_long_options(self):
opts, args = self.ap.parse_args('--VarIable X:y --TOGGLE arg'.split())
assert_equals(opts['variable'], ['X:y'])
assert_equals(opts['toggle'], True)
assert_equals(args, ['arg'])
def test_case_insensitive_long_options_with_equal_sign(self):
opts, args = self.ap.parse_args('--VariAble=X:y --VARIABLE=ZzZ'.split())
assert_equals(opts['variable'], ['X:y', 'ZzZ'])
assert_equals(args, [])
def test_unescape_options(self):
cli = '--escape quot:Q -E space:SP -E lt:LT -E gt:GT ' \
+ '-N QQQLTmySPfineSPnameGTQQQ sourceSPwithSPspaces'
opts, args = self.ap.parse_args(cli.split())
assert_equals(opts['name'], '"""<my fine name>"""')
assert_equals(args, ['source with spaces'])
assert_true('escape' not in opts)
def test_split_pythonpath(self):
ap = ArgumentParser('ignored')
data = [(['path'], ['path']),
(['path1','path2'], ['path1','path2']),
(['path1:path2'], ['path1','path2']),
(['p1:p2:p3','p4','.'], ['p1','p2','p3','p4','.'])]
if os.sep == '\\':
data += [(['c:\\path'], ['c:\\path']),
(['c:\\path','d:\\path'], ['c:\\path','d:\\path']),
(['c:\\path:d:\\path'], ['c:\\path','d:\\path']),
(['c:/path:x:yy:d:\\path','c','.','x:/xxx'],
['c:\\path', 'x', 'yy', 'd:\\path', 'c', '.', 'x:\\xxx'])]
for inp, exp in data:
assert_equals(ap._split_pythonpath(inp), exp)
def test_get_pythonpath(self):
ap = ArgumentParser('ignored')
p1 = os.path.abspath('.')
p2 = os.path.abspath('..')
assert_equals(ap._get_pythonpath(p1), [p1])
assert_equals(ap._get_pythonpath([p1,p2]), [p1,p2])
assert_equals(ap._get_pythonpath([p1 + ':' + p2]), [p1,p2])
assert_true(p1 in ap._get_pythonpath(os.path.join(p2,'*')))
def test_arguments_are_globbed(self):
_, args = self.ap.parse_args([__file__.replace('test_', '?????')])
assert_equals(args, [__file__])
# Needed to ensure that the globbed directory contains files
globexpr = os.path.join(os.path.dirname(__file__), '*')
_, args = self.ap.parse_args([globexpr])
assert_true(len(args) > 1)
def test_arguments_with_glob_patterns_arent_removed_if_they_dont_match(self):
_, args = self.ap.parse_args(['*.non.existing', 'non.ex.??'])
assert_equals(args, ['*.non.existing', 'non.ex.??'])
def test_special_options_are_removed(self):
ap = ArgumentParser('''Usage:
-h --help
-v --version
--pythonpath path
--escape x:y *
--argumentfile path
--option
''')
opts, args = ap.parse_args(['--option'])
assert_equals(opts, {'option': True})
def test_special_options_can_be_turned_to_normal_optios(self):
ap = ArgumentParser('''Usage:
-h --help
-v --version
--pythonpath path
--escape x:y
--argumentfile path
''', auto_help=False, auto_version=False, auto_escape=False,
auto_pythonpath=False, auto_argumentfile=False)
opts, args = ap.parse_args(['--help', '-v', '--escape', 'xxx'])
assert_equals(opts, {'help': True, 'version': True, 'pythonpath': None,
'escape': 'xxx', 'argumentfile': None})
def test_non_list_args(self):
ap = ArgumentParser('''Options:
-t --toggle
-v --value value
-m --multi multi *
''')
opts, args = ap.parse_args(())
assert_equals(opts, {'toggle': None,
'value': None,
'multi': []})
assert_equals(args, [])
opts, args = ap.parse_args(('-t', '-v', 'xxx', '-m', '1', '-m2', 'arg'))
assert_equals(opts, {'toggle': True,
'value': 'xxx',
'multi': ['1', '2']})
assert_equals(args, ['arg'])
class TestDefaultsFromEnvironmentVariables(unittest.TestCase):
def setUp(self):
os.environ['ROBOT_TEST_OPTIONS'] = '-t --value default -m1 --multi=2'
self.ap = ArgumentParser('''Options:
-t --toggle
-v --value value
-m --multi multi *
''', env_options='ROBOT_TEST_OPTIONS')
def tearDown(self):
os.environ.pop('ROBOT_TEST_OPTIONS')
def test_flag(self):
opts, args = self.ap.parse_args([])
assert_equals(opts['toggle'], True)
opts, args = self.ap.parse_args(['--toggle'])
assert_equals(opts['toggle'], True)
opts, args = self.ap.parse_args(['--notoggle'])
assert_equals(opts['toggle'], False)
def test_value(self):
opts, args = self.ap.parse_args([])
assert_equals(opts['value'], 'default')
opts, args = self.ap.parse_args(['--value', 'given'])
assert_equals(opts['value'], 'given')
def test_multi_value(self):
opts, args = self.ap.parse_args([])
assert_equals(opts['multi'], ['1', '2'])
opts, args = self.ap.parse_args(['-m3', '--multi', '4'])
assert_equals(opts['multi'], ['1', '2', '3', '4'])
def test_arguments(self):
os.environ['ROBOT_TEST_OPTIONS'] = '-o opt arg1 arg2'
ap = ArgumentParser('Usage:\n -o --opt value',
env_options='ROBOT_TEST_OPTIONS')
opts, args = ap.parse_args([])
assert_equals(opts['opt'], 'opt')
assert_equals(args, ['arg1', 'arg2'])
def test_environment_variable_not_set(self):
ap = ArgumentParser('Usage:\n -o --opt value', env_options='NOT_SET')
opts, args = ap.parse_args(['arg'])
assert_equals(opts['opt'], None)
assert_equals(args, ['arg'])
class TestArgumentValidation(unittest.TestCase):
def test_check_args_with_correct_args(self):
for arg_limits in [None, (1, 1), 1, (1,)]:
ap = ArgumentParser(USAGE, arg_limits=arg_limits)
assert_equals(ap.parse_args(['hello'])[1], ['hello'])
def test_default_validation(self):
ap = ArgumentParser(USAGE)
for args in [(), ('1',), ('m', 'a', 'n', 'y')]:
assert_equals(ap.parse_args(args)[1], list(args))
def test_check_args_with_wrong_number_of_args(self):
for limits in [1, (1, 1), (1, 2)]:
ap = ArgumentParser('usage', arg_limits=limits)
for args in [(), ('arg1', 'arg2', 'arg3')]:
assert_raises(DataError, ap.parse_args, args)
def test_check_variable_number_of_args(self):
ap = ArgumentParser('usage: robot.py [options] args', arg_limits=(1,))
ap.parse_args(['one_is_ok'])
ap.parse_args(['two', 'ok'])
ap.parse_args(['this', 'should', 'also', 'work', '!'])
assert_raises_with_msg(DataError, "Expected at least 1 argument, got 0.",
ap.parse_args, [])
def test_argument_range(self):
ap = ArgumentParser('usage: test.py [options] args', arg_limits=(2,4))
ap.parse_args(['1', '2'])
ap.parse_args(['1', '2', '3', '4'])
assert_raises_with_msg(DataError, "Expected 2 to 4 arguments, got 1.",
ap.parse_args, ['one is not enough'])
def test_no_arguments(self):
ap = ArgumentParser('usage: test.py [options]', arg_limits=(0, 0))
ap.parse_args([])
assert_raises_with_msg(DataError, "Expected 0 arguments, got 2.",
ap.parse_args, ['1', '2'])
def test_custom_validator_fails(self):
def validate(options, args):
raise AssertionError
ap = ArgumentParser(USAGE2, validator=validate)
assert_raises(AssertionError, ap.parse_args, [])
def test_custom_validator_return_value(self):
def validate(options, args):
return options, [a.upper() for a in args]
ap = ArgumentParser(USAGE2, validator=validate)
opts, args = ap.parse_args(['-v', 'value', 'inp1', 'inp2'])
assert_equals(opts['variable'], 'value')
assert_equals(args, ['INP1', 'INP2'])
class TestPrintHelpAndVersion(unittest.TestCase):
def setUp(self):
self.ap = ArgumentParser(USAGE, version='1.0 alpha')
self.ap2 = ArgumentParser(USAGE2)
def test_print_help(self):
assert_raises_with_msg(Information, USAGE2,
self.ap2.parse_args, ['--help'])
def test_name_is_got_from_first_line_of_the_usage(self):
assert_equals(self.ap.name, 'Example Tool')
assert_equals(self.ap2.name, 'Just Name Here')
def test_name_and_version_can_be_given(self):
ap = ArgumentParser(USAGE, name='Kakkonen', version='2')
assert_equals(ap.name, 'Kakkonen')
assert_equals(ap.version, '2')
def test_print_version(self):
assert_raises_with_msg(Information, 'Example Tool 1.0 alpha',
self.ap.parse_args, ['--version'])
def test_print_version_when_version_not_set(self):
ap = ArgumentParser(' --version', name='Kekkonen')
msg = assert_raises(Information, ap.parse_args, ['--version'])
assert_equals(unicode(msg), 'Kekkonen %s' % get_full_version())
def test_version_is_replaced_in_help(self):
assert_raises_with_msg(Information, USAGE.replace('<VERSION>', '1.0 alpha'),
self.ap.parse_args, ['--help'])
def test_escapes_are_replaced_in_help(self):
usage = """Name
--escape x:y blaa blaa .............................................. end
<-----------------------ESCAPES---------------------------->
-- next line --
--help"""
expected = """Name
--escape x:y blaa blaa .............................................. end
Available escapes: amp (&), apos ('), at (@), bslash (\),
colon (:), comma (,), curly1 ({), curly2 (}), dollar ($),
exclam (!), gt (>), hash (#), lt (<), paren1 ((), paren2
()), percent (%), pipe (|), quest (?), quot ("), semic (;),
slash (/), space ( ), square1 ([), square2 (]), star (*)
-- next line --
--help"""
assert_raises_with_msg(Information, expected,
ArgumentParser(usage).parse_args, ['--help'])
if __name__ == "__main__":
unittest.main()
|
userzimmermann/robotframework
|
utest/utils/test_argumentparser.py
|
Python
|
apache-2.0
| 17,703
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import purchase
from . import stock
from . import purchase_requisition
|
ddico/odoo
|
addons/purchase_requisition_stock/models/__init__.py
|
Python
|
agpl-3.0
| 154
|
"""cleff URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# url('', include('social.apps.django_app.urls', namespace='social')),
# url('', include('django.contrib.auth.urls', namespace='auth')),
url(r'^admin/',
include(admin.site.urls)),
url(r'^profiles/',
include('profiles.urls',
namespace='profiles')),
url(r'^forum/',
include('Forum.urls',
namespace='Forum')),
url(r'^',
include('cleff_main.urls', namespace='main')),
url(r'^uploads/',
include('stickyuploads.urls')),
url(r'^message/',
include('messaging.urls',
namespace='message')),
url(r'^search/',
include('haystack.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
lancekrogers/music-network
|
cleff/cleff/urls.py
|
Python
|
apache-2.0
| 1,797
|
#
# A class that handles all HTTP communication with Thingdom.
#
import urllib2
import json
class HttpHelper:
#
# Constructor method. Initialize local data.
#
def __init__(self):
self._uri = 'https://api.thingdom.io/1.1'
self._request_counter = 0
#
# Perform a HTTP Get request.
#
# requestPath - a string containing the path and optional query parameters (e.g. path/to/somewhere?param1=1¶m2=2)
# Returns a dictionary representing the request response
#
def get_data(self, requestPath):
response = self._do_request(requestPath, None)
return response
#
# Perform a HTTP Post request.
#
# requestPath - a string containing the path to where data will be posted (e.g. path/to/post/endpoint)
# data - a dictionary containing the data that will be posted.
# Returns a dictionary representing the request response.
#
def post_data(self, requestPath, data=None):
self._request_counter += 1
data['counter'] = str(self._request_counter)
data['time'] = '2015/01/15 09:30:00'
response = self._do_request(requestPath, data)
return response
# *************************************************************************
# Helper Methods
# *************************************************************************
#
# Perform HTTP request.
#
# requestPath - a string containing the path to where data will be retrieved or posted.
# data - a dictionary containing the data that will be posted.
# Returns a dictionary representing the request response.
#
def _do_request(self, requestPath, data):
url = self._uri + '/' + requestPath
request = urllib2.Request(url)
if data is not None:
request.add_header('Content-Type', 'application/json')
request.add_data(json.dumps(data))
response = urllib2.urlopen(request)
responseJson = json.load(response)
return responseJson
|
thingdomio/thingdom-python
|
thingdom/httpHelper.py
|
Python
|
mit
| 2,023
|
bold = '\033[1m'
end = '\033[0m'
black = '\033[0;30m'
red = '\033[0;31m'
green = '\033[0;32m'
yellow = '\033[0;33m'
blue = '\033[0;34m'
purple = '\033[0;35m'
cyan = '\033[0;36m'
grey = '\033[0;37m'
# Bold
bblack = '\033[1;30m'
bred = '\033[1;31m'
bgreen = '\033[1;32m'
byellow = '\033[1;33m'
bylue = '\033[1;34m'
bpurple = '\033[1;35m'
bcyan = '\033[1;36m'
bgrey = '\033[1;37m'
# High Intensty
iblack = '\033[0;90m'
ired = '\033[0;91m'
igreen = '\033[0;92m'
iyellow = '\033[0;93m'
iblue = '\033[0;94m'
ipurple = '\033[0;95m'
icyan = '\033[0;96m'
igrey = '\033[0;97m'
# Bold High Intensty
biblack = '\033[1;90m'
bired = '\033[1;91m'
bigreen = '\033[1;92m'
biyellow= '\033[1;93m'
biblue = '\033[1;94m'
bipurple= '\033[1;95m'
bicyan = '\033[1;96m'
bigrey = '\033[1;97m'
def enable():
global bold, end
global black, red, green, yellow, blue, purple, cyan, grey
global bblack, bred, bgreen, byellow, bblue, bpurple, bcyan, bgrey
global iblack, ired, igreen, iyellow, iblue, ipurple, icyan, igrey
global biblack, bired, bigreen, biyellow, biblue, bipurple, bicyan, bigrey
bold = '\033[1m'
end = '\033[0m'
black = '\033[0;30m'
red = '\033[0;31m'
green = '\033[0;32m'
yellow = '\033[0;33m'
blue = '\033[0;34m'
purple = '\033[0;35m'
cyan = '\033[0;36m'
grey = '\033[0;37m'
# Bold
bblack = '\033[1;30m'
bred = '\033[1;31m'
bgreen = '\033[1;32m'
byellow = '\033[1;33m'
bylue = '\033[1;34m'
bpurple = '\033[1;35m'
bcyan = '\033[1;36m'
bgrey = '\033[1;37m'
# High Intensty
iblack = '\033[0;90m'
ired = '\033[0;91m'
igreen = '\033[0;92m'
iyellow = '\033[0;93m'
iblue = '\033[0;94m'
ipurple = '\033[0;95m'
icyan = '\033[0;96m'
igrey = '\033[0;97m'
# Bold High Intensty
biblack = '\033[1;90m'
bired = '\033[1;91m'
bigreen = '\033[1;92m'
biyellow= '\033[1;93m'
biblue = '\033[1;94m'
bipurple= '\033[1;95m'
bicyan = '\033[1;96m'
bigrey = '\033[1;97m'
def disable():
global bold, end
global black, red, green, yellow, blue, purple, cyan, grey
global bblack, bred, bgreen, byellow, bblue, bpurple, bcyan, bgrey
global iblack, ired, igreen, iyellow, iblue, ipurple, icyan, igrey
global biblack, bired, bigreen, biyellow, biblue, bipurple, bicyan, bigrey
bold = ''
end = ''
black = ''
red = ''
green = ''
yellow = ''
blue = ''
purple = ''
cyan = ''
grey = ''
# Bold
bblack = ''
bred = ''
bgreen = ''
byellow = ''
bylue = ''
bpurple = ''
bcyan = ''
bgrey = ''
# High Intensty
iblack = ''
ired = ''
igreen = ''
iyellow = ''
iblue = ''
ipurple = ''
icyan = ''
igrey = ''
# Bold High Intensty
biblack = ''
bired = ''
bigreen = ''
biyellow= ''
biblue = ''
bipurple= ''
bicyan = ''
bigrey = ''
enable()
|
humm/toolbox
|
toolbox/color.py
|
Python
|
mit
| 3,152
|
# Copyright (c) 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
"""Astroid hooks for numpy.core.numeric module."""
import functools
from astroid.brain.brain_numpy_utils import infer_numpy_member, looks_like_numpy_member
from astroid.brain.helpers import register_module_extender
from astroid.builder import parse
from astroid.inference_tip import inference_tip
from astroid.manager import AstroidManager
from astroid.nodes.node_classes import Attribute
def numpy_core_numeric_transform():
return parse(
"""
# different functions defined in numeric.py
import numpy
def zeros_like(a, dtype=None, order='K', subok=True): return numpy.ndarray((0, 0))
def ones_like(a, dtype=None, order='K', subok=True): return numpy.ndarray((0, 0))
def full_like(a, fill_value, dtype=None, order='K', subok=True): return numpy.ndarray((0, 0))
"""
)
register_module_extender(
AstroidManager(), "numpy.core.numeric", numpy_core_numeric_transform
)
METHODS_TO_BE_INFERRED = {
"ones": """def ones(shape, dtype=None, order='C'):
return numpy.ndarray([0, 0])"""
}
for method_name, function_src in METHODS_TO_BE_INFERRED.items():
inference_function = functools.partial(infer_numpy_member, function_src)
AstroidManager().register_transform(
Attribute,
inference_tip(inference_function),
functools.partial(looks_like_numpy_member, method_name),
)
|
PyCQA/astroid
|
astroid/brain/brain_numpy_core_numeric.py
|
Python
|
lgpl-2.1
| 1,785
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-01 09:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0018_project_properties'),
]
operations = [
migrations.AlterModelOptions(
name='product',
options={'ordering': ['-id'], 'permissions': (('view_product', 'View product'),)},
),
migrations.AlterModelOptions(
name='productstatus',
options={'ordering': ['-id'], 'permissions': (('view_productstatus', 'View product status'),)},
),
migrations.AlterModelOptions(
name='project',
options={'ordering': ['-identifier'], 'permissions': (('view_project', 'View project'),)},
),
migrations.AlterModelOptions(
name='projectstatus',
options={'ordering': ['-id'], 'permissions': (('view_projectstatus', 'View project status'),)},
),
]
|
GETLIMS/LIMS-Backend
|
lims/projects/migrations/0019_auto_20180301_0958.py
|
Python
|
mit
| 1,025
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_ping
short_description: Check remote PostgreSQL server availability
description:
- Simple module to check remote PostgreSQL server availability.
version_added: '2.8'
options:
db:
description:
- Name of database to connect.
type: str
aliases:
- login_db
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
# PostgreSQL ping dbsrv server from the shell:
# ansible dbsrv -m postgresql_ping
# In the example below you need to generate sertificates previously.
# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
- name: PostgreSQL ping dbsrv server using not default credentials and ssl
postgresql_ping:
db: protected_db
login_host: dbsrv
login_user: secret
login_password: secret_pass
ca_cert: /root/root.crt
ssl_mode: verify-full
'''
RETURN = r'''
is_available:
description: PostgreSQL server availability.
returned: always
type: bool
sample: true
server_version:
description: PostgreSQL server version.
returned: always
type: dict
sample: { major: 10, minor: 1 }
'''
try:
import psycopg2
HAS_PSYCOPG2 = True
except ImportError:
HAS_PSYCOPG2 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError
from ansible.module_utils.postgres import postgres_common_argument_spec
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
# ===========================================
# PostgreSQL module specific support methods.
#
class PgPing(object):
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.is_available = False
self.version = {}
def do(self):
self.get_pg_version()
return (self.is_available, self.version)
def get_pg_version(self):
query = "SELECT version()"
raw = self.__exec_sql(query)[0][0]
if raw:
self.is_available = True
raw = raw.split()[1].split('.')
self.version = dict(
major=int(raw[0]),
minor=int(raw[1]),
)
def __exec_sql(self, query):
try:
self.cursor.execute(query)
res = self.cursor.fetchall()
if res:
return res
except SQLParseError as e:
self.module.fail_json(msg=to_native(e))
self.cursor.close()
except Exception as e:
self.module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', aliases=['login_db']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_PSYCOPG2:
module.fail_json(msg="The python psycopg2 module is required")
sslrootcert = module.params["ca_cert"]
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"db": "database",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order '
'to user the ca_cert parameter')
# Set some default values:
cursor = False
db_connection = False
result = dict(
changed=False,
is_available=False,
server_version=dict(),
)
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least '
'version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
except Exception as e:
module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
# Do job:
pg_ping = PgPing(module, cursor)
if cursor:
# If connection established:
result["is_available"], result["server_version"] = pg_ping.do()
db_connection.rollback()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
SergeyCherepanov/ansible
|
ansible/ansible/modules/database/postgresql/postgresql_ping.py
|
Python
|
mit
| 6,115
|
"""biome table
Revision ID: 5c6a141c3642
Revises: e6287e300888
Create Date: 2017-02-14 03:01:29.736374
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5c6a141c3642'
down_revision = 'e6287e300888'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'biome',
sa.Column('id', sa.Integer(), primary_key=True),
sa.Column('wilderness_id', sa.Integer()),
sa.Column('encounter_id', sa.Integer()),
sa.Column('description', sa.String()),
sa.ForeignKeyConstraint(['encounter_id'], ['encounter_table.id'], ),
sa.ForeignKeyConstraint(['wilderness_id'], ['wilderness.id'], ),
)
def downgrade():
op.drop_table('biome')
|
d2emon/gurps-helper
|
migrations/versions/5c6a141c3642_biome_table.py
|
Python
|
gpl-3.0
| 754
|
""" XVM (c) www.modxvm.com 2013-2015 """
__all__ = ['startConfigWatchdog', 'stopConfigWatchdog']
# PUBLIC
def startConfigWatchdog():
# debug('startConfigWatchdog')
_g_configWatchdog.stopConfigWatchdog()
_g_configWatchdog.configWatchdog()
def stopConfigWatchdog():
# debug('stopConfigWatchdog')
_g_configWatchdog.stopConfigWatchdog()
# PRIVATE
import os
import traceback
import BigWorld
from gui.shared import g_eventBus, events
from constants import *
from logger import *
class _ConfigWatchdog(object):
configWatchdogTimerId = None
lastConfigDirState = None
def configWatchdog(self):
# debug('configWatchdog(): {0}'.format(XVM.CONFIG_DIR))
self.configWatchdogTimerId = None
try:
x = [(nm, os.path.getmtime(nm)) for nm in [os.path.join(p, f)
for p, n, fn in os.walk(XVM.CONFIG_DIR)
for f in fn]]
if self.lastConfigDirState is None:
self.lastConfigDirState = x
elif self.lastConfigDirState != x:
self.lastConfigDirState = x
# debug('reload config')
g_eventBus.handleEvent(events.HasCtxEvent(XVM_EVENT.RELOAD_CONFIG, {'filename':XVM.CONFIG_FILE}))
return
except Exception, ex:
err(traceback.format_exc())
self.configWatchdogTimerId = BigWorld.callback(1, self.configWatchdog)
def stopConfigWatchdog(self):
# debug('stopConfigWatchdog')
if self.configWatchdogTimerId is not None:
BigWorld.cancelCallback(self.configWatchdogTimerId)
self.configWatchdogTimerId = None
_g_configWatchdog = _ConfigWatchdog()
|
Satariall/xvm-test
|
src/xpm/xvm_main/configwatchdog.py
|
Python
|
gpl-3.0
| 1,794
|
from django.apps import AppConfig
from django.conf import settings
from django.core.checks import register
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from kuma.celery import app
class CoreConfig(AppConfig):
"""
The Django App Config class to store information about the core app
and do startup time things.
"""
name = "kuma.core"
verbose_name = _("Core")
def ready(self):
"""Configure kuma.core after models are loaded."""
from kuma.core.checks import react_i18n_check
register(react_i18n_check)
# Clean up expired sessions every 60 minutes
from kuma.core.tasks import clean_sessions
app.add_periodic_task(60 * 60, clean_sessions.s())
@cached_property
def language_mapping(self):
"""
a static mapping of lower case language names and their native names
"""
# LANGUAGES settings return a list of tuple with language code and their native name
# Make the language code lower and convert the tuple to dictionary
return {lang[0].lower(): lang[1] for lang in settings.LANGUAGES}
|
Elchi3/kuma
|
kuma/core/apps.py
|
Python
|
mpl-2.0
| 1,180
|
from django.conf.urls import patterns, include, url
# import .views
urlpatterns = patterns('',
url(r"^$", "blog.views.index", name="index"),
url(r"^post/(?P<pk>\d+)/$", "blog.views.post", name="post"),
url(r"^category/(?P<pk>\d+)/$", "blog.views.category", name="category"),
)
|
wwq0327/djwebapp-blog
|
djblog/blog/urls.py
|
Python
|
mit
| 370
|
from PyQt5.QtWidgets import QWidget
from functionalities import Functionalities
from pop_up_ui import pop_up_External_Neuron_Array
class PopUpExternalNeuronArray(Functionalities):
def __init__(self, parent):
self.main_win = QWidget()
self.ui = pop_up_External_Neuron_Array.Ui_centralwidget()
self.ui.setupUi(self.main_win)
# Create main window object
self.mainWindow = parent
# set relative folder
self.rel_folder = self.rel_folder()
# initial states
self.ui.checkBox_Global_Rot.setCheckState(2)
self.ui.widget_Global_Rot_Unchecked.hide()
self.ui.checkBox_Global_Rot.stateChanged.connect(
lambda: self.enable_disable_menu_item_on_checkbok_click_double_menu(self.ui.checkBox_Global_Rot,
self.ui.widget_Global_Rot_Checked,
self.ui.widget_Global_Rot_Unchecked))
self.ui.lineEdit_X_Coordinate_Old.editingFinished.connect(
lambda: self.check_lineedit_if_list_entered(self.ui.lineEdit_X_Coordinate_Old.text(),
self.ui.lineEdit_X_Coordinate_Old))
self.ui.lineEdit_Y_Coordinate_Old.editingFinished.connect(
lambda: self.check_lineedit_if_list_entered(self.ui.lineEdit_Y_Coordinate_Old.text(),
self.ui.lineEdit_Y_Coordinate_Old))
self.ui.lineEdit_Z_Coordinate_Old.editingFinished.connect(
lambda: self.check_lineedit_if_list_entered(self.ui.lineEdit_Z_Coordinate_Old.text(),
self.ui.lineEdit_Z_Coordinate_Old))
self.ui.lineEdit_XY_Angles.editingFinished.connect(
lambda: self.check_lineedit_if_list_entered(self.ui.lineEdit_XY_Angles.text(), self.ui.lineEdit_XY_Angles))
self.ui.lineEdit_YZ_Angles.editingFinished.connect(
lambda: self.check_lineedit_if_list_entered(self.ui.lineEdit_YZ_Angles.text(), self.ui.lineEdit_YZ_Angles))
self.ui.lineEdit_ZX_Angles.editingFinished.connect(
lambda: self.check_lineedit_if_list_entered(self.ui.lineEdit_ZX_Angles.text(), self.ui.lineEdit_ZX_Angles))
self.ui.lineEdit_Alpha_Array_Glob.editingFinished.connect(
lambda: self.check_lineedit_if_list_entered(self.ui.lineEdit_Alpha_Array_Glob.text(),
self.ui.lineEdit_Alpha_Array_Glob))
self.ui.lineEdit_Beta_Array_Glob.editingFinished.connect(
lambda: self.check_lineedit_if_list_entered(self.ui.lineEdit_Beta_Array_Glob.text(),
self.ui.lineEdit_Beta_Array_Glob))
self.ui.lineEdit_Gamma_Array_Glob.editingFinished.connect(
lambda: self.check_lineedit_if_list_entered(self.ui.lineEdit_Gamma_Array_Glob.text(),
self.ui.lineEdit_Gamma_Array_Glob))
# Save and cancel
self.filename = "{}/pop_up_control/dictionaries/dict_external_neuron_array.py".format(self.rel_folder)
self.ui.pushButton_Save.clicked.connect(
lambda: self.saveCloseWindow(self.output_dict(), self.filename))
self.ui.pushButton_Cancel.clicked.connect(lambda: self.closeWindow())
def output_dict(self):
output_dict = {
'Global_rot': self.corrector(1, self.ui.checkBox_Global_Rot.checkState()),
'x_seed': self.set_default_values(0, self.ui.doubleSpinBox_X_Seed.value()),
'y_seed': self.set_default_values(0, self.ui.doubleSpinBox_Y_Seed.value()),
'z_seed': self.set_default_values(0, self.ui.doubleSpinBox_Z_Seed.value()),
'x_steps': self.set_default_values(0, self.ui.spinBox_X_Steps.value()),
'y_steps': self.set_default_values(0, self.ui.spinBox_Y_Steps.value()),
'z_steps': self.set_default_values(0, self.ui.spinBox_Z_Steps.value()),
'x_step': self.set_default_values(0, self.ui.doubleSpinBox_X_Step.value()),
'y_step': self.set_default_values(0, self.ui.doubleSpinBox_Y_Step.value()),
'z_step': self.set_default_values(0, self.ui.doubleSpinBox_Z_Step.value()),
'alpha_array_glob': self.set_default_values([0],
self.ui.lineEdit_Alpha_Array_Glob.text()),
'beta_array_glob': self.set_default_values([0],
self.ui.lineEdit_Beta_Array_Glob.text()),
'gamma_array_glob': self.set_default_values([0],
self.ui.lineEdit_Gamma_Array_Glob.text()),
'X_coord_old': self.set_default_values(0, self.ui.lineEdit_X_Coordinate_Old.text()),
'Y_coord_old': self.set_default_values(0, self.ui.lineEdit_Y_Coordinate_Old.text()),
'Z_coord_old': self.set_default_values(0, self.ui.lineEdit_Z_Coordinate_Old.text()),
'YZ_angles': self.set_default_values([0], self.ui.lineEdit_YZ_Angles.text()),
'ZX_angles': self.set_default_values([0], self.ui.lineEdit_ZX_Angles.text()),
'XY_angles': self.set_default_values([0], self.ui.lineEdit_XY_Angles.text())
}
return output_dict
def saveDict(self):
self.saveCloseWindow(self.output_dict(), self.filename)
|
andreashorn/lead_dbs
|
ext_libs/OSS-DBS/OSS_platform/GUI_tree_files/pop_up_control/external_neuron_arrray.py
|
Python
|
gpl-3.0
| 5,535
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Oxcoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Oxcoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
Skytells/Oxcoin
|
share/qt/clean_mac_info_plist.py
|
Python
|
mit
| 893
|
# The Hazard Library
# Copyright (C) 2012-2016 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.source.area` defines :class:`AreaSource`.
"""
from copy import deepcopy
from openquake.hazardlib.geo import Point
from openquake.hazardlib.source.point import PointSource
from openquake.hazardlib.source.rupture import ParametricProbabilisticRupture
from openquake.baselib.slots import with_slots
@with_slots
class AreaSource(PointSource):
"""
Area source represents uniform seismicity occurring over a geographical
region.
:param polygon:
An instance of :class:`openquake.hazardlib.geo.polygon.Polygon`
that defines source's area.
:param area_discretization:
Float number, polygon area discretization spacing in kilometers.
See :meth:`openquake.hazardlib.source.area.AreaSource.iter_ruptures`.
Other parameters (except ``location``) are the same as for
:class:`~openquake.hazardlib.source.point.PointSource`.
"""
_slots_ = PointSource._slots_ + 'polygon area_discretization'.split()
MODIFICATIONS = set(())
RUPTURE_WEIGHT = 1 / 10.
def __init__(self, source_id, name, tectonic_region_type,
mfd, rupture_mesh_spacing,
magnitude_scaling_relationship, rupture_aspect_ratio,
temporal_occurrence_model,
# point-specific parameters (excluding location)
upper_seismogenic_depth, lower_seismogenic_depth,
nodal_plane_distribution, hypocenter_distribution,
# area-specific parameters
polygon, area_discretization):
super(AreaSource, self).__init__(
source_id, name, tectonic_region_type, mfd, rupture_mesh_spacing,
magnitude_scaling_relationship, rupture_aspect_ratio,
temporal_occurrence_model, upper_seismogenic_depth,
lower_seismogenic_depth, location=None,
nodal_plane_distribution=nodal_plane_distribution,
hypocenter_distribution=hypocenter_distribution,
)
self.polygon = polygon
self.area_discretization = area_discretization
def get_rupture_enclosing_polygon(self, dilation=0):
"""
Extends the area source polygon by ``dilation`` plus
:meth:`~openquake.hazardlib.source.point.PointSource._get_max_rupture_projection_radius`.
See :meth:`superclass method
<openquake.hazardlib.source.base.BaseSeismicSource.get_rupture_enclosing_polygon>`
for parameter and return value definition.
"""
max_rup_radius = self._get_max_rupture_projection_radius()
return self.polygon.dilate(max_rup_radius + dilation)
def iter_ruptures(self):
"""
See :meth:`openquake.hazardlib.source.base.BaseSeismicSource.iter_ruptures`
for description of parameters and return value.
Area sources are treated as a collection of point sources
(see :mod:`openquake.hazardlib.source.point`) with uniform parameters.
Ruptures of area source are just a union of ruptures
of those point sources. The actual positions of the implied
point sources form a uniformly spaced mesh on the polygon.
Polygon's method :meth:
`~openquake.hazardlib.geo.polygon.Polygon.discretize`
is used for creating a mesh of points on the source's area.
Constructor's parameter ``area_discretization`` is used as
polygon's discretization spacing (not to be confused with
rupture surface's mesh spacing which is as well provided
to the constructor).
The ruptures' occurrence rates are rescaled with respect to number
of points the polygon discretizes to.
"""
polygon_mesh = self.polygon.discretize(self.area_discretization)
rate_scaling_factor = 1.0 / len(polygon_mesh)
# take the very first point of the polygon mesh
[epicenter0] = polygon_mesh[0:1]
# generate "reference ruptures" -- all the ruptures that have the same
# epicenter location (first point of the polygon's mesh) but different
# magnitudes, nodal planes, hypocenters' depths and occurrence rates
ref_ruptures = []
for (mag, mag_occ_rate) in self.get_annual_occurrence_rates():
for (np_prob, np) in self.nodal_plane_distribution.data:
for (hc_prob, hc_depth) in self.hypocenter_distribution.data:
hypocenter = Point(latitude=epicenter0.latitude,
longitude=epicenter0.longitude,
depth=hc_depth)
occurrence_rate = (mag_occ_rate
* float(np_prob) * float(hc_prob))
occurrence_rate *= rate_scaling_factor
surface = self._get_rupture_surface(mag, np, hypocenter)
ref_ruptures.append((mag, np.rake, hc_depth,
surface, occurrence_rate))
# for each of the epicenter positions generate as many ruptures
# as we generated "reference" ones: new ruptures differ only
# in hypocenter and surface location
for epicenter in polygon_mesh:
for mag, rake, hc_depth, surface, occ_rate in ref_ruptures:
# translate the surface from first epicenter position
# to the target one preserving it's geometry
surface = surface.translate(epicenter0, epicenter)
hypocenter = deepcopy(epicenter)
hypocenter.depth = hc_depth
rupture = ParametricProbabilisticRupture(
mag, rake, self.tectonic_region_type, hypocenter,
surface, type(self), occ_rate,
self.temporal_occurrence_model
)
yield rupture
def count_ruptures(self):
"""
See
:meth:`openquake.hazardlib.source.base.BaseSeismicSource.count_ruptures`
for description of parameters and return value.
"""
polygon_mesh = self.polygon.discretize(self.area_discretization)
return (len(polygon_mesh) *
len(self.get_annual_occurrence_rates()) *
len(self.nodal_plane_distribution.data) *
len(self.hypocenter_distribution.data))
def filter_sites_by_distance_to_source(self, integration_distance, sites):
"""
Overrides :meth:`implementation
<openquake.hazardlib.source.point.PointSource.filter_sites_by_distance_to_source>`
of the point source class just to call the :meth:`base class one
<openquake.hazardlib.source.base.BaseSeismicSource.filter_sites_by_distance_to_source>`.
"""
return super(PointSource, self).filter_sites_by_distance_to_source(
integration_distance, sites
)
|
rcgee/oq-hazardlib
|
openquake/hazardlib/source/area.py
|
Python
|
agpl-3.0
| 7,581
|
# Both players play optimally. Hence there is always a way for the first player to start the game such that he/she always wins.
t = int(raw_input())
for tc in xrange(t):
n, p = map(int, raw_input().split())
print "Airborne wins." if p == 0 else "Pagfloyd wins."
|
babhishek21/oj-sols
|
spoj/HUBULLU.py
|
Python
|
mit
| 265
|
from django.conf.urls import url
from django.contrib import admin
from shortener.views import HomeView, URLRedirectView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', HomeView.as_view()),
url(r'^(?P<shortcode>[\w-]+)/$', URLRedirectView.as_view(), name='scode'),
]
|
wederribas/TryDjango1.10
|
kirr/kirr/urls.py
|
Python
|
mit
| 294
|
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Window class for Windows
============================================================================
"""
# pylint: disable=E0401
# This file imports Win32-only symbols.
# pylint: disable=E0213,E1120,W0212
# Suppress warnings about _win32gui_func() lambdas and protected access.
from ctypes import windll, pointer, c_wchar, c_ulong
import win32api
import win32con
import win32gui
from .base_window import BaseWindow
from .rectangle import Rectangle
from ..actions.action_key import Key
#===========================================================================
class Win32Window(BaseWindow):
"""
The Window class is an interface to the Win32 window control
and placement.
"""
_results_box_class_names = ["#32770", "DgnResultsBoxWindow"]
#-----------------------------------------------------------------------
# Class methods to create new Window objects.
@classmethod
def get_foreground(cls):
handle = win32gui.GetForegroundWindow()
if handle in cls._windows_by_id:
return cls._windows_by_id[handle]
window = Win32Window(handle=handle)
return window
@classmethod
def get_all_windows(cls):
def function(handle, argument):
argument.append(cls.get_window(handle))
argument = []
win32gui.EnumWindows(function, argument)
return argument
@classmethod
def get_matching_windows(cls, executable=None, title=None):
# Make window searches case-insensitive.
if executable:
executable = executable.lower()
if title:
title = title.lower()
matching = []
for window in cls.get_all_windows():
if not window.is_visible:
continue
if executable:
if window.executable.lower().find(executable) == -1:
continue
if title:
if window.title.lower().find(title) == -1:
continue
if (window.executable.endswith("natspeak.exe")
and window.classname in cls._results_box_class_names
and window.get_position().dy < 50):
# If a window matches the above, it is very probably
# the results-box of DNS. We ignore this because
# its title is the words of the last recognition,
# which will often interfere with a search for
# a window with a spoken title.
continue
matching.append(window)
return matching
# @classmethod
# def get_window_by_executable(cls, executable):
# def function(handle, argument):
# title = windll.user32.GetWindowText(handle)
# print "title: %r" % title
# windll.user32.EnumWindows(function, argument)
#-----------------------------------------------------------------------
# Methods for initialization and introspection.
def __init__(self, handle):
super(Win32Window, self).__init__(id=handle)
def __repr__(self):
args = ["handle=%d" % self.handle] + list(self._names)
return "%s(%s)" % (self.__class__.__name__, ", ".join(args))
#-----------------------------------------------------------------------
# Direct access to various Win32 methods.
def _win32gui_func(name):
func = getattr(win32gui, name)
return lambda self: func(self._handle)
_get_rect = _win32gui_func("GetWindowRect")
_destroy = _win32gui_func("DestroyWindow")
_set_foreground = _win32gui_func("SetForegroundWindow")
_bring_to_top = _win32gui_func("BringWindowToTop")
_get_window_text = _win32gui_func("GetWindowText")
_get_class_name = _win32gui_func("GetClassName")
def _win32gui_test(name):
test = getattr(win32gui, name)
fget = lambda self: test(self._handle) and True or False
return property(fget=fget,
doc="Shortcut to win32gui.%s() function." % name)
is_valid = _win32gui_test("IsWindow")
is_enabled = _win32gui_test("IsWindowEnabled")
is_visible = _win32gui_test("IsWindowVisible")
is_minimized = _win32gui_test("IsIconic")
@property
def is_maximized(self):
# IsZoomed() is not available from win32gui for some reason.
# So we use the function directly.
return bool(windll.user32.IsZoomed(self._handle))
def _win32gui_show_window(state):
return lambda self: win32gui.ShowWindow(self._handle, state)
minimize = _win32gui_show_window(win32con.SW_MINIMIZE)
maximize = _win32gui_show_window(win32con.SW_MAXIMIZE)
restore = _win32gui_show_window(win32con.SW_RESTORE)
def _win32gui_post(message, w=0, l=0):
return lambda self: win32gui.PostMessage(self._handle, message, w, l)
close = _win32gui_post(win32con.WM_CLOSE)
def _get_window_pid(self):
# Get this window's process ID.
pid = c_ulong()
windll.user32.GetWindowThreadProcessId(self._handle, pointer(pid))
return pid.value
def _get_window_module(self):
# Get this window's process ID.
pid = self._get_window_pid()
# Get the process handle of this window's process ID.
# Access permission flags:
# 0x0410 = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ
handle = windll.kernel32.OpenProcess(0x0410, 0, pid)
# Retrieve and return the process's executable path.
try:
# Try to use the QueryForProcessImageNameW function
# available since Windows Vista.
buffer_len = c_ulong(256)
buffer = (c_wchar * buffer_len.value)()
windll.kernel32.QueryFullProcessImageNameW(handle, 0,
pointer(buffer),
pointer(buffer_len))
buffer = buffer[:]
buffer = buffer[:buffer.index("\0")]
except Exception:
# If the function above failed, fall back to the older
# GetModuleFileNameEx function, available since windows XP.
# Note that this fallback function seems to fail when
# this process is 32 bit Python and handle refers to a
# 64-bit process.
buffer_len = 256
buffer = (c_wchar * buffer_len)()
windll.psapi.GetModuleFileNameExW(handle, 0, pointer(buffer),
buffer_len)
buffer = buffer[:]
buffer = buffer[:buffer.index("\0")]
finally:
windll.kernel32.CloseHandle(handle)
return str(buffer)
#-----------------------------------------------------------------------
# Methods related to window geometry.
def get_position(self):
l, t, r, b = self._get_rect()
w = r - l; h = b - t
return Rectangle(l, t, w, h)
def set_position(self, rectangle):
assert isinstance(rectangle, Rectangle)
l, t, w, h = rectangle.ltwh
win32gui.MoveWindow(self._handle, l, t, w, h, 1)
#-----------------------------------------------------------------------
# Methods for miscellaneous window control.
def set_foreground(self):
# Bring this window into the foreground if it isn't already the
# current foreground window.
if self.handle != win32gui.GetForegroundWindow():
if self.is_minimized:
self.restore()
# Press a key so Windows allows us to use SetForegroundWindow()
# (received last input event). See Microsoft's documentation on
# SetForegroundWindow() for why this works.
# Only do this if neither the left or right control keys are
# held down.
if win32api.GetKeyState(win32con.VK_CONTROL) >= 0:
Key("control:down,control:up").execute()
# Set the foreground window.
self._set_foreground()
def set_focus(self):
# Setting window focus without raising the window doesn't appear to
# be possible in Windows, so fallback on set_foreground().
self.set_foreground()
|
Versatilus/dragonfly
|
dragonfly/windows/win32_window.py
|
Python
|
lgpl-3.0
| 9,120
|
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
from gui.statsView import StatsView
from gui.bitmap_loader import BitmapLoader
from gui.utils.numberFormatter import formatAmount
from service.price import Fit, Price
from service.settings import MarketPriceSettings
class PriceViewMinimal(StatsView):
name = "priceViewMinimal"
def __init__(self, parent):
StatsView.__init__(self)
self.parent = parent
self.settings = MarketPriceSettings.getInstance()
def getHeaderText(self, fit):
return "Price"
def populatePanel(self, contentPanel, headerPanel):
contentSizer = contentPanel.GetSizer()
self.panel = contentPanel
self.headerPanel = headerPanel
headerContentSizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer = headerPanel.GetSizer()
hsizer.Add(headerContentSizer, 0, 0, 0)
self.labelEMStatus = wx.StaticText(headerPanel, wx.ID_ANY, "")
headerContentSizer.Add(self.labelEMStatus)
headerPanel.GetParent().AddToggleItem(self.labelEMStatus)
gridPrice = wx.GridSizer(1, 3, 0, 0)
contentSizer.Add(gridPrice, 0, wx.EXPAND | wx.ALL, 0)
for _type in ("ship", "fittings", "total"):
image = "%sPrice_big" % _type if _type != "ship" else "ship_big"
box = wx.BoxSizer(wx.HORIZONTAL)
gridPrice.Add(box, 0, wx.ALIGN_TOP)
box.Add(BitmapLoader.getStaticBitmap(image, contentPanel, "gui"), 0, wx.ALIGN_CENTER)
vbox = wx.BoxSizer(wx.VERTICAL)
box.Add(vbox, 1, wx.EXPAND)
vbox.Add(wx.StaticText(contentPanel, wx.ID_ANY, _type.capitalize()), 0, wx.ALIGN_LEFT)
hbox = wx.BoxSizer(wx.HORIZONTAL)
vbox.Add(hbox)
lbl = wx.StaticText(contentPanel, wx.ID_ANY, "0.00 ISK")
setattr(self, "labelPrice%s" % _type.capitalize(), lbl)
hbox.Add(lbl, 0, wx.ALIGN_LEFT)
def refreshPanel(self, fit):
if fit is not None:
self.fit = fit
fit_items = set(Fit.fitItemIter(fit))
Price.getInstance().getPrices(fit_items, self.processPrices, fetchTimeout=30)
self.labelEMStatus.SetLabel("Updating prices...")
self.refreshPanelPrices(fit)
self.panel.Layout()
def refreshPanelPrices(self, fit=None):
ship_price = 0
module_price = 0
drone_price = 0
fighter_price = 0
cargo_price = 0
booster_price = 0
implant_price = 0
if fit:
ship_price = fit.ship.item.price.price
if fit.modules:
for module in fit.modules:
if not module.isEmpty:
module_price += module.item.price.price
if fit.drones:
for drone in fit.drones:
drone_price += drone.item.price.price * drone.amount
if fit.fighters:
for fighter in fit.fighters:
fighter_price += fighter.item.price.price * fighter.amount
if fit.cargo:
for cargo in fit.cargo:
cargo_price += cargo.item.price.price * cargo.amount
if fit.boosters:
for booster in fit.boosters:
booster_price += booster.item.price.price
if fit.appliedImplants:
for implant in fit.appliedImplants:
implant_price += implant.item.price.price
fitting_price = module_price
total_price = 0
total_price += ship_price
total_price += module_price
if self.settings.get("drones"):
total_price += drone_price + fighter_price
if self.settings.get("cargo"):
total_price += cargo_price
if self.settings.get("character"):
total_price += booster_price + implant_price
self.labelPriceShip.SetLabel("%s ISK" % formatAmount(ship_price, 3, 3, 9, currency=True))
self.labelPriceShip.SetToolTip(wx.ToolTip('{:,.2f} ISK'.format(ship_price)))
self.labelPriceFittings.SetLabel("%s ISK" % formatAmount(fitting_price, 3, 3, 9, currency=True))
self.labelPriceFittings.SetToolTip(wx.ToolTip('{:,.2f} ISK'.format(fitting_price)))
self.labelPriceTotal.SetLabel("%s ISK" % formatAmount(total_price, 3, 3, 9, currency=True))
self.labelPriceTotal.SetToolTip(wx.ToolTip('{:,.2f} ISK'.format(total_price)))
def processPrices(self, prices):
self.refreshPanelPrices(self.fit)
self.labelEMStatus.SetLabel("")
self.panel.Layout()
PriceViewMinimal.register()
|
DarkFenX/Pyfa
|
gui/builtinStatsViews/priceViewMinimal.py
|
Python
|
gpl-3.0
| 5,462
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Gilles-Alexandre Quenot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class FortuneoTest(BackendTest):
BACKEND = 'fortuneo'
def test_fortuneo(self):
l = list(self.backend.iter_accounts())
self.assertTrue(len(l) > 0)
a = l[0]
list(self.backend.iter_history(a))
# vim:ts=4:sw=4
|
eirmag/weboob
|
modules/fortuneo/test.py
|
Python
|
agpl-3.0
| 1,032
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
The flight control tab shows telimitry data and flight settings.
"""
__author__ = 'Bitcraze AB'
__all__ = ['FlightTab']
import sys
import roslib; roslib.load_manifest('crazyflie_ros')
import rospy
from std_msgs.msg import String
from crazyflie.msg import *
import logging
logger = logging.getLogger(__name__)
from time import time
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtCore import Qt, pyqtSlot, pyqtSignal, QThread, SIGNAL
from PyQt4.QtGui import QMessageBox
from cflib.crazyflie import Crazyflie
from cfclient.ui.widgets.ai import AttitudeIndicator
from cfclient.utils.guiconfig import GuiConfig
from cflib.crazyflie.log import Log, LogVariable, LogConfig
from cfclient.ui.tab import Tab
flight_tab_class = uic.loadUiType(sys.path[0] +
"/cfclient/ui/tabs/flightTab.ui")[0]
MAX_THRUST = 65365.0
class FlightTab(Tab, flight_tab_class):
uiSetupReadySignal = pyqtSignal()
_motor_data_signal = pyqtSignal(int, object, object)
_imu_data_signal = pyqtSignal(int, object, object)
_althold_data_signal = pyqtSignal(int, object, object)
_baro_data_signal = pyqtSignal(int, object, object)
_input_updated_signal = pyqtSignal(float, float, float, float)
_rp_trim_updated_signal = pyqtSignal(float, float)
_emergency_stop_updated_signal = pyqtSignal(bool)
_log_error_signal = pyqtSignal(object, str)
#UI_DATA_UPDATE_FPS = 10
connectionFinishedSignal = pyqtSignal(str)
disconnectedSignal = pyqtSignal(str)
def __init__(self, tabWidget, helper, *args):
rospy.init_node("cf_flightTab")
self.motor_pub = rospy.Publisher("cf_motorData", MotorData)
self.stabilizer_pub = rospy.Publisher("cf_stabData", StabData)
self.acc_pub = rospy.Publisher("cf_accData", AccData)
self.gyro_pub = rospy.Publisher("cf_gyroData", GyroData)
super(FlightTab, self).__init__(*args)
self.setupUi(self)
self.tabName = "Flight Control"
self.menuName = "Flight Control"
self.tabWidget = tabWidget
self.helper = helper
self.disconnectedSignal.connect(self.disconnected)
self.connectionFinishedSignal.connect(self.connected)
# Incomming signals
self.helper.cf.connected.add_callback(
self.connectionFinishedSignal.emit)
self.helper.cf.disconnected.add_callback(self.disconnectedSignal.emit)
self._input_updated_signal.connect(self.updateInputControl)
self.helper.inputDeviceReader.input_updated.add_callback(
self._input_updated_signal.emit)
self._rp_trim_updated_signal.connect(self.calUpdateFromInput)
self.helper.inputDeviceReader.rp_trim_updated.add_callback(
self._rp_trim_updated_signal.emit)
self._emergency_stop_updated_signal.connect(self.updateEmergencyStop)
self.helper.inputDeviceReader.emergency_stop_updated.add_callback(
self._emergency_stop_updated_signal.emit)
self.helper.inputDeviceReader.althold_updated.add_callback(
lambda enabled: self.helper.cf.param.set_value("flightmode.althold", enabled))
self._imu_data_signal.connect(self._imu_data_received)
self._baro_data_signal.connect(self._baro_data_received)
self._althold_data_signal.connect(self._althold_data_received)
self._motor_data_signal.connect(self._motor_data_received)
self._log_error_signal.connect(self._logging_error)
# Connect UI signals that are in this tab
self.flightModeCombo.currentIndexChanged.connect(self.flightmodeChange)
self.minThrust.valueChanged.connect(self.minMaxThrustChanged)
self.maxThrust.valueChanged.connect(self.minMaxThrustChanged)
self.thrustLoweringSlewRateLimit.valueChanged.connect(
self.thrustLoweringSlewRateLimitChanged)
self.slewEnableLimit.valueChanged.connect(
self.thrustLoweringSlewRateLimitChanged)
self.targetCalRoll.valueChanged.connect(self._trim_roll_changed)
self.targetCalPitch.valueChanged.connect(self._trim_pitch_changed)
self.maxAngle.valueChanged.connect(self.maxAngleChanged)
self.maxYawRate.valueChanged.connect(self.maxYawRateChanged)
self.uiSetupReadySignal.connect(self.uiSetupReady)
self.clientXModeCheckbox.toggled.connect(self.changeXmode)
self.isInCrazyFlightmode = False
self.uiSetupReady()
self.clientXModeCheckbox.setChecked(GuiConfig().get("client_side_xmode"))
self.crazyflieXModeCheckbox.clicked.connect(
lambda enabled:
self.helper.cf.param.set_value("flightmode.x",
str(enabled)))
self.helper.cf.param.add_update_callback(
group="flightmode", name="xmode",
cb=( lambda name, checked:
self.crazyflieXModeCheckbox.setChecked(eval(checked))))
self.ratePidRadioButton.clicked.connect(
lambda enabled:
self.helper.cf.param.set_value("flightmode.ratepid",
str(enabled)))
self.angularPidRadioButton.clicked.connect(
lambda enabled:
self.helper.cf.param.set_value("flightmode.ratepid",
str(not enabled)))
self.helper.cf.param.add_update_callback(
group="flightmode", name="ratepid",
cb=(lambda name, checked:
self.ratePidRadioButton.setChecked(eval(checked))))
self.helper.cf.param.add_update_callback(
group="flightmode", name="althold",
cb=(lambda name, enabled:
self.helper.inputDeviceReader.setAltHold(eval(enabled))))
self.helper.cf.param.add_update_callback(
group="imu_sensors",
cb=self._set_available_sensors)
self.logBaro = None
self.logAltHold = None
self.ai = AttitudeIndicator()
self.verticalLayout_4.addWidget(self.ai)
self.splitter.setSizes([1000,1])
self.targetCalPitch.setValue(GuiConfig().get("trim_pitch"))
self.targetCalRoll.setValue(GuiConfig().get("trim_roll"))
def thrustToPercentage(self, thrust):
return ((thrust / MAX_THRUST) * 100.0)
def uiSetupReady(self):
flightComboIndex = self.flightModeCombo.findText(
GuiConfig().get("flightmode"), Qt.MatchFixedString)
if (flightComboIndex < 0):
self.flightModeCombo.setCurrentIndex(0)
self.flightModeCombo.currentIndexChanged.emit(0)
else:
self.flightModeCombo.setCurrentIndex(flightComboIndex)
self.flightModeCombo.currentIndexChanged.emit(flightComboIndex)
def _logging_error(self, log_conf, msg):
QMessageBox.about(self, "Log error", "Error when starting log config"
" [%s]: %s" % (log_conf.name, msg))
def _motor_data_received(self, timestamp, data, logconf):
if self.isVisible():
self.actualM1.setValue(data["motor.m1"])
self.actualM2.setValue(data["motor.m2"])
self.actualM3.setValue(data["motor.m3"])
self.actualM4.setValue(data["motor.m4"])
d = MotorData()
d.m1 = data["motor.m1"]
d.m2 = data["motor.m2"]
d.m3 = data["motor.m3"]
d.m4 = data["motor.m4"]
self.motor_pub.publish(d)
def _gyro_data_received(self, timestamp, data, logconf):
if self.isVisible():
self.actualx.setValue(data["gyro.x"])
self.actualy.setValue(data["gyro.y"])
self.actualz.setValue(data["gyro.z"])
d = GyroData()
d.x = data["gyro.x"]
d.y = data["gyro.y"]
d.z = data["gyro.z"]
self.gyro_pub.publish(d)
def _accel_data_received(self, timestamp, data, logconf):
if self.isVisible():
self.actualx.setValue(data["acc.x"])
self.actualy.setValue(data["acc.y"])
self.actualz.setValue(data["acc.z"])
d = AccelData()
self.accel_pub.publish(AccelData())
d.x = data["acc.x"]
d.y = data["acc.y"]
d.z = data["acc.z"]
self.accel_pub.publish(d)
def _baro_data_received(self, timestamp, data, logconf):
if self.isVisible():
self.actualASL.setText(("%.2f" % data["baro.aslLong"]))
self.ai.setBaro(data["baro.aslLong"])
def _althold_data_received(self, timestamp, data, logconf):
if self.isVisible():
target = data["altHold.target"]
if target>0:
if not self.targetASL.isEnabled():
self.targetASL.setEnabled(True)
self.targetASL.setText(("%.2f" % target))
self.ai.setHover(target)
elif self.targetASL.isEnabled():
self.targetASL.setEnabled(False)
self.targetASL.setText("Not set")
self.ai.setHover(0)
def _imu_data_received(self, timestamp, data, logconf):
if self.isVisible():
self.actualRoll.setText(("%.2f" % data["stabilizer.roll"]))
self.actualPitch.setText(("%.2f" % data["stabilizer.pitch"]))
self.actualYaw.setText(("%.2f" % data["stabilizer.yaw"]))
self.actualThrust.setText("%.2f%%" %
self.thrustToPercentage(
data["stabilizer.thrust"]))
self.ai.setRollPitch(-data["stabilizer.roll"],
data["stabilizer.pitch"])
def connected(self, linkURI):
# IMU & THRUST
lg = LogConfig("Stabalizer", GuiConfig().get("ui_update_period"))
lg.add_variable("stabilizer.roll", "float")
lg.add_variable("stabilizer.pitch", "float")
lg.add_variable("stabilizer.yaw", "float")
lg.add_variable("stabilizer.thrust", "uint16_t")
self.helper.cf.log.add_config(lg)
if (lg.valid):
lg.data_received_cb.add_callback(self._imu_data_signal.emit)
lg.error_cb.add_callback(self._log_error_signal.emit)
lg.start()
else:
logger.warning("Could not setup logconfiguration after "
"connection!")
# MOTOR
lg = LogConfig("Motors", GuiConfig().get("ui_update_period"))
lg.add_variable("motor.m1")
lg.add_variable("motor.m2")
lg.add_variable("motor.m3")
lg.add_variable("motor.m4")
self.helper.cf.log.add_config(lg)
if lg.valid:
lg.data_received_cb.add_callback(self._motor_data_signal.emit)
lg.error_cb.add_callback(self._log_error_signal.emit)
lg.start()
else:
logger.warning("Could not setup logconfiguration after "
"connection!")
#Accel
lg = LogConfic("Accelerometer", GuiConfig().get("ui_update_period"))
lg.add_variable("acc.x")
lg.add_variable("acc.y")
lg.add_variable("acc.z")
self.helper.cf.log.add_config(lg)
if lg.valid:
lg.data_received_cb.add_callback(self._accel_data_signal.emit)
lg.error_cb.add_callback(self._log_error_signal.emit)
lg.start()
else:
logger.warning("Could not setup logconfiguration after "
"connection!")
#Gyroscope
lg = LogConfic("Gyroscope", GuiConfig().get("ui_update_period"))
lg.add_variable("gyro.x")
lg.add_variable("gyro.y")
lg.add_variable("gyro.z")
self.helper.cf.log.add_config(lg)
if lg.valid:
lg.data_received_cb.add_callback(self._gyro_data_signal.emit)
lg.error_cb.add_callback(self._log_error_signal.emit)
lg.start()
else:
logger.warning("Could not setup logconfiguration after "
"connection!")
def _set_available_sensors(self, name, available):
logger.info("[%s]: %s", name, available)
available = eval(available)
if ("HMC5883L" in name):
if (not available):
self.actualASL.setText("N/A")
self.actualASL.setEnabled(False)
else:
self.actualASL.setEnabled(True)
self.helper.inputDeviceReader.setAltHoldAvailable(available)
if (not self.logBaro and not self.logAltHold):
# The sensor is available, set up the logging
self.logBaro = LogConfig("Baro", 200)
self.logBaro.add_variable("baro.aslLong", "float")
self.helper.cf.log.add_config(self.logBaro)
if self.logBaro.valid:
self.logBaro.data_received_cb.add_callback(
self._baro_data_signal.emit)
self.logBaro.error_cb.add_callback(
self._log_error_signal.emit)
self.logBaro.start()
else:
logger.warning("Could not setup logconfiguration after "
"connection!")
self.logAltHold = LogConfig("AltHold", 200)
self.logAltHold.add_variable("altHold.target", "float")
self.helper.cf.log.add_config(self.logAltHold)
if self.logAltHold.valid:
self.logAltHold.data_received_cb.add_callback(
self._althold_data_signal.emit)
self.logAltHold.error_cb.add_callback(
self._log_error_signal.emit)
self.logAltHold.start()
else:
logger.warning("Could not setup logconfiguration after "
"connection!")
def disconnected(self, linkURI):
self.ai.setRollPitch(0, 0)
self.actualM1.setValue(0)
self.actualM2.setValue(0)
self.actualM3.setValue(0)
self.actualM4.setValue(0)
self.actualRoll.setText("")
self.actualPitch.setText("")
self.actualYaw.setText("")
self.actualThrust.setText("")
self.actualASL.setText("")
self.targetASL.setText("Not Set")
self.targetASL.setEnabled(False)
self.actualASL.setEnabled(False)
self.logBaro = None
self.logAltHold = None
def minMaxThrustChanged(self):
self.helper.inputDeviceReader.set_thrust_limits(
self.minThrust.value(), self.maxThrust.value())
if (self.isInCrazyFlightmode == True):
GuiConfig().set("min_thrust", self.minThrust.value())
GuiConfig().set("max_thrust", self.maxThrust.value())
def thrustLoweringSlewRateLimitChanged(self):
self.helper.inputDeviceReader.set_thrust_slew_limiting(
self.thrustLoweringSlewRateLimit.value(),
self.slewEnableLimit.value())
if (self.isInCrazyFlightmode == True):
GuiConfig().set("slew_limit", self.slewEnableLimit.value())
GuiConfig().set("slew_rate", self.thrustLoweringSlewRateLimit.value())
def maxYawRateChanged(self):
logger.debug("MaxYawrate changed to %d", self.maxYawRate.value())
self.helper.inputDeviceReader.set_yaw_limit(self.maxYawRate.value())
if (self.isInCrazyFlightmode == True):
GuiConfig().set("max_yaw", self.maxYawRate.value())
def maxAngleChanged(self):
logger.debug("MaxAngle changed to %d", self.maxAngle.value())
self.helper.inputDeviceReader.set_rp_limit(self.maxAngle.value())
if (self.isInCrazyFlightmode == True):
GuiConfig().set("max_rp", self.maxAngle.value())
def _trim_pitch_changed(self, value):
logger.debug("Pitch trim updated to [%f]" % value)
self.helper.inputDeviceReader.set_trim_pitch(value)
GuiConfig().set("trim_pitch", value)
def _trim_roll_changed(self, value):
logger.debug("Roll trim updated to [%f]" % value)
self.helper.inputDeviceReader.set_trim_roll(value)
GuiConfig().set("trim_roll", value)
def calUpdateFromInput(self, rollCal, pitchCal):
logger.debug("Trim changed on joystick: roll=%.2f, pitch=%.2f",
rollCal, pitchCal)
self.targetCalRoll.setValue(rollCal)
self.targetCalPitch.setValue(pitchCal)
def updateInputControl(self, roll, pitch, yaw, thrust):
self.targetRoll.setText(("%0.2f" % roll))
self.targetPitch.setText(("%0.2f" % pitch))
self.targetYaw.setText(("%0.2f" % yaw))
self.targetThrust.setText(("%0.2f %%" %
self.thrustToPercentage(thrust)))
self.thrustProgress.setValue(thrust)
def setMotorLabelsEnabled(self, enabled):
self.M1label.setEnabled(enabled)
self.M2label.setEnabled(enabled)
self.M3label.setEnabled(enabled)
self.M4label.setEnabled(enabled)
def emergencyStopStringWithText(self, text):
return ("<html><head/><body><p>"
"<span style='font-weight:600; color:#7b0005;'>{}</span>"
"</p></body></html>".format(text))
def updateEmergencyStop(self, emergencyStop):
if emergencyStop:
self.setMotorLabelsEnabled(False)
self.emergency_stop_label.setText(
self.emergencyStopStringWithText("Kill switch active"))
else:
self.setMotorLabelsEnabled(True)
self.emergency_stop_label.setText("")
def flightmodeChange(self, item):
GuiConfig().set("flightmode", self.flightModeCombo.itemText(item))
logger.info("Changed flightmode to %s",
self.flightModeCombo.itemText(item))
self.isInCrazyFlightmode = False
if (item == 0): # Normal
self.maxAngle.setValue(GuiConfig().get("normal_max_rp"))
self.maxThrust.setValue(GuiConfig().get("normal_max_thrust"))
self.minThrust.setValue(GuiConfig().get("normal_min_thrust"))
self.slewEnableLimit.setValue(GuiConfig().get("normal_slew_limit"))
self.thrustLoweringSlewRateLimit.setValue(
GuiConfig().get("normal_slew_rate"))
self.maxYawRate.setValue(GuiConfig().get("normal_max_yaw"))
if (item == 1): # Advanced
self.maxAngle.setValue(GuiConfig().get("max_rp"))
self.maxThrust.setValue(GuiConfig().get("max_thrust"))
self.minThrust.setValue(GuiConfig().get("min_thrust"))
self.slewEnableLimit.setValue(GuiConfig().get("slew_limit"))
self.thrustLoweringSlewRateLimit.setValue(
GuiConfig().get("slew_rate"))
self.maxYawRate.setValue(GuiConfig().get("max_yaw"))
self.isInCrazyFlightmode = True
if (item == 0):
newState = False
else:
newState = True
self.maxThrust.setEnabled(newState)
self.maxAngle.setEnabled(newState)
self.minThrust.setEnabled(newState)
self.thrustLoweringSlewRateLimit.setEnabled(newState)
self.slewEnableLimit.setEnabled(newState)
self.maxYawRate.setEnabled(newState)
@pyqtSlot(bool)
def changeXmode(self, checked):
self.helper.cf.commander.set_client_xmode(checked)
GuiConfig().set("client_side_xmode", checked)
logger.info("Clientside X-mode enabled: %s", checked)
|
WSCU/crazyflie_ros
|
src/cfclient/ui/tabs/FlightTab.py
|
Python
|
gpl-2.0
| 21,361
|
"""
# Majic
# Copyright (C) 2014 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from sqlalchemy import Column, Integer, String, Boolean
from joj.model import Base
from joj.utils import constants
class OutputVariable(Base):
"""
JULES Output variable
"""
__tablename__ = 'output_variables'
id = Column(Integer, primary_key=True)
name = Column(String(constants.DB_STRING_SIZE))
description = Column(String(constants.DB_LONG_STRING_SIZE))
depends_on_nsmax = Column(Boolean, default=False)
def __repr__(self):
"""String representation"""
return "<Parameter(name=%s)>" % self.name
|
NERC-CEH/jules-jasmin
|
majic/joj/model/output_variable.py
|
Python
|
gpl-2.0
| 1,339
|
import os
import sys
import json
__location__ = os.path.dirname(os.path.realpath(__file__))
jsonData = []
if len(sys.argv) < 3:
print ("Error: invalid arguments")
else:
if os.path.isfile(sys.argv[1]):
inputFile = sys.argv[1]
else:
inputFile = os.path.join(__location__, sys.argv[1])
if not os.path.isfile(inputFile):
print ("Error reading input file")
else:
keyCount = 0
for key in sys.argv:
keyCount = keyCount + 1
if keyCount < 3:
pass
else:
print ("Graphing " + key)
file = open(inputFile, "r")
data = file.readlines()
file.close()
issueList = ["2014_21", "2014_22", "2015_01", "2015_02","2015_03","2015_04","2015_05","2015_06","2015_07","2015_08","2015_09","2015_10","2015_12","2015_13","2015_14","2015_15","2015_16","2015_17","2015_18","2015_19","2015_20","2015_21","2015_22","2015_23", "2015_24"]
for issueNumber in issueList:
newLine = {"Issue": issueNumber, "name": key, "value": 0}
jsonData.append(newLine)
for line in data:
if "|" in line:
entity = line.split("|")[0]
fileName = line.split("|")[2]
issue = fileName.split(".")[0]
if len(issue) == 6:
issue = "_0".join(issue.split("_"))
if entity.lower() == key.lower():
for line in jsonData:
if line["Issue"] == issue and line["name"] == key:
line["value"] = line["value"] + 1
jsonText = json.dumps(jsonData, indent=2)
f = open("graphData.json", "w")
f.write(jsonText)
f.close()
|
gwiedeman/ResearchingNY2016
|
graphEntities.py
|
Python
|
unlicense
| 1,481
|
from importlib import import_module
DEFAULT_ELEMENT_TYPES = set(['form', 'input', 'radio', 'checkbox', 'button', 'table'])
BOOTSTRAP3_ELEMENT_TYPES = DEFAULT_ELEMENT_TYPES | set(['panel', 'modal'])
SUPPORTED_ELEMENT_TYPES = {
'default': DEFAULT_ELEMENT_TYPES,
'bootstrap3': BOOTSTRAP3_ELEMENT_TYPES,
}
class ElemParserFactory(object):
@classmethod
def load_parser(cls, elem_type):
"""
Given a element type returns the corresponding parser instance.
All errors raised by the import process (ImportError, AttributeError) are allowed to propagate.
"""
module_path = 'y2h.parser.elems.{0}'.format(elem_type.lower())
try:
module = import_module(module_path)
except Exception as e:
raise
parser_class_name = '{0}Parser'.format(elem_type.title())
return getattr(module, parser_class_name, None)
@classmethod
def guess_elem_type(cls, template, elem_value):
"""
Try to identify element's type, e,g: form, input, radio...
elem can be a string or dict
# e,g:
# html:
# - form
# - form:
# fieldset:
# the first form is a string type, the second form is a dict type
"""
elem_type = None
if isinstance(elem_value, str):
# e,g: elem is 'form'
elem_type = elem_value
elif isinstance(elem_value, dict):
supported_element_types = SUPPORTED_ELEMENT_TYPES.get(template, DEFAULT_ELEMENT_TYPES)
for guess_type in supported_element_types:
if guess_type in elem_value.keys():
elem_type = guess_type
break
else:
raise ValueError("Invalid element while creating element parser")
return elem_type
@classmethod
def create(cls, template, elem_value):
elem_type = cls.guess_elem_type(template, elem_value)
if not elem_type:
return None
parser_class = cls.load_parser(elem_type)
if not parser_class:
return None
return parser_class(template, elem_type, elem_value)
|
rfancn/y2h
|
y2h/parser/factory.py
|
Python
|
mit
| 2,247
|
"""
@package mi.dataset.driver.optaa_dj.dcl.driver
@file marine-integrations/mi/dataset/driver/optaa_dj/dcl/driver.py
@author Steve Myerson (Raytheon)
@brief Driver for the optaa_dj_dcl
Release notes:
Initial release
"""
__author__ = 'Steve Myerson (Raytheon)'
__license__ = 'Apache 2.0'
from mi.core.common import BaseEnum
from mi.core.exceptions import ConfigurationException
from mi.core.log import get_logger; log = get_logger()
from mi.dataset.dataset_driver import \
DataSetDriverConfigKeys, \
HarvesterType, \
MultipleHarvesterDataSetDriver
from mi.dataset.harvester import \
SingleDirectoryHarvester
from mi.dataset.parser.optaa_dj_dcl import \
OptaaDjDclRecoveredParser, \
OptaaDjDclTelemeteredParser, \
OptaaDjDclRecoveredInstrumentDataParticle, \
OptaaDjDclRecoveredMetadataDataParticle, \
OptaaDjDclTelemeteredInstrumentDataParticle, \
OptaaDjDclTelemeteredMetadataDataParticle
MODULE_NAME = 'mi.dataset.parser.optaa_dj_dcl'
class DataTypeKey(BaseEnum):
"""
These are the possible harvester/parser pairs for this driver
"""
OPTAA_DJ_RECOVERED = 'optaa_dj_dcl_recovered'
OPTAA_DJ_TELEMETERED = 'optaa_dj_dcl_telemetered'
class OptaaDjDclDataSetDriver(MultipleHarvesterDataSetDriver):
def __init__(self, config, memento, data_callback, state_callback,
event_callback, exception_callback):
# Initialize the possible types of harvester/parser pairs
# for this driver.
data_keys = DataTypeKey.list()
# Link the data keys to the harvester type.
# Recovered harvester is single directory.
# Telemetered harvester is single directory.
harvester_type = {
DataTypeKey.OPTAA_DJ_RECOVERED: HarvesterType.SINGLE_DIRECTORY,
DataTypeKey.OPTAA_DJ_TELEMETERED: HarvesterType.SINGLE_DIRECTORY,
}
super(OptaaDjDclDataSetDriver, self).__init__(config, memento,
data_callback, state_callback, event_callback,
exception_callback, data_keys, harvester_type=harvester_type)
@classmethod
def stream_config(cls):
return [
OptaaDjDclRecoveredInstrumentDataParticle.type(),
OptaaDjDclRecoveredMetadataDataParticle.type(),
OptaaDjDclTelemeteredInstrumentDataParticle.type(),
OptaaDjDclTelemeteredMetadataDataParticle.type()
]
def _build_harvester(self, driver_state):
"""
Build the harvesters.
Verify correctness of data keys.
Display warnings if error detected in data keys.
@param driver_state The starting driver state
"""
harvesters = []
# Verify that the Recovered harvester has been configured.
# If so, build the harvester and add it to the list of harvesters.
if DataTypeKey.OPTAA_DJ_RECOVERED in self._harvester_config:
harvesters.append(self.build_single_harvester(DataTypeKey.OPTAA_DJ_RECOVERED,
driver_state))
else:
log.warn('No configuration for optaa_dj_dcl recovered harvester, not building')
# Verify that the Telemetered harvester has been configured.
# If so, build the harvester and add it to the list of harvesters.
if DataTypeKey.OPTAA_DJ_TELEMETERED in self._harvester_config:
harvesters.append(self.build_single_harvester(DataTypeKey.OPTAA_DJ_TELEMETERED,
driver_state))
else:
log.warn('No configuration for optaa_dj_dcl telemetered harvester, not building')
return harvesters
def build_single_harvester(self, key, driver_state):
harvester = SingleDirectoryHarvester(
self._harvester_config.get(key),
driver_state[key],
lambda filename: self._new_file_callback(filename, key),
lambda modified: self._modified_file_callback(modified, key),
self._exception_callback)
return harvester
def _build_parser(self, parser_state, stream_in, data_key):
"""
Build the requested parser based on the data key
@param parser_state starting parser state to pass to parser
@param stream_in Handle of open file to pass to parser
@param stream_in Filename string to pass to parser
@param data_key Key to determine which parser type is built
"""
# Build the recovered parser if requested.
if data_key == DataTypeKey.OPTAA_DJ_RECOVERED:
config = self._parser_config[data_key]
config.update({
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: None
})
parser_class = OptaaDjDclRecoveredParser
# Build the telemetered parser if requested.
elif data_key == DataTypeKey.OPTAA_DJ_TELEMETERED:
config = self._parser_config[data_key]
config.update({
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: None
})
parser_class = OptaaDjDclTelemeteredParser
# Not one of the keys we recognize?
# No parser for you!
else:
raise ConfigurationException('Optaa_dj Parser configuration incorrect %s',
data_key)
# Note that the Optaa_Dj parsers need the name of the file being parsed.
parser = parser_class(
config,
stream_in,
parser_state,
lambda state, ingested:
self._save_parser_state(state, data_key, ingested),
self._data_callback,
self._sample_exception_callback,
self._file_in_process[data_key])
return parser
|
ooici/marine-integrations
|
mi/dataset/driver/optaa_dj/dcl/driver.py
|
Python
|
bsd-2-clause
| 5,920
|
import os
import sys
from gooey.gui.windows import layouts
from gooey.python_bindings import argparse_to_json
from gooey.gui.util.quoting import quote
def create_from_parser(parser, source_path, **kwargs):
auto_start = kwargs.get('auto_start', False)
if hasattr(sys, 'frozen'):
run_cmd = quote(source_path)
else:
run_cmd = '{} -u {}'.format(quote(sys.executable), quote(source_path))
build_spec = {
'language': kwargs.get('language', 'english'),
'target': run_cmd,
'program_name': kwargs.get('program_name') or os.path.basename(sys.argv[0]).replace('.py', ''),
'program_description': kwargs.get('program_description', ''),
'auto_start': kwargs.get('auto_start', False),
'show_advanced': kwargs.get('advanced', True),
'default_size': kwargs.get('default_size', (610, 530)),
'num_required_cols': kwargs.get('required_cols', 1),
'num_optional_cols': kwargs.get('optional_cols', 3),
'manual_start': False,
'layout_type': 'flat',
'monospace_display': kwargs.get('monospace_display', False),
'image_dir': kwargs.get('image_dir'),
'language_dir': kwargs.get('language_dir'),
'progress_regex': kwargs.get('progress_regex'),
'progress_expr': kwargs.get('progress_expr'),
'disable_progress_bar_animation': kwargs.get('disable_progress_bar_animation'),
'disable_stop_button': kwargs.get('disable_stop_button'),
'group_by_type': kwargs.get('group_by_type', True)
}
if not auto_start:
build_spec['program_description'] = parser.description or build_spec['program_description']
layout_data = argparse_to_json.convert(parser) if build_spec['show_advanced'] else layouts.basic_config.items()
build_spec.update(layout_data)
return build_spec
|
ME-ICA/me-ica
|
gooey/python_bindings/config_generator.py
|
Python
|
lgpl-2.1
| 1,912
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Copyright (c) 2011 Tyler Kenendy <tk@tkte.ch>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
def transform_floats(o):
if isinstance(o, float):
return RoundedFloat(o)
elif isinstance(o, dict):
return dict((k, transform_floats(v)) for k, v in o.iteritems())
elif isinstance(o, (list, tuple)):
return map(transform_floats, o)
return o
class RoundedFloat(float):
def __repr__(self):
return "%.5g" % self
|
deathcap/Burger
|
burger/roundedfloats.py
|
Python
|
mit
| 1,486
|
from __future__ import division
from builtins import range
import numpy as np
np.seterr(divide='ignore') # these warnings are usually harmless for this code
np.random.seed(0)
from matplotlib import pyplot as plt
import matplotlib
import os
matplotlib.rcParams['font.size'] = 8
import pyhsmm
from pyhsmm.util.text import progprint_xrange
print('''
This demo shows how HDP-HMMs can fail when the underlying data has state
persistence without some kind of temporal regularization (in the form of a
sticky bias or duration modeling): without setting the number of states to be
the correct number a priori, lots of extra states can be intsantiated.
BUT the effect is much more relevant on real data (when the data doesn't exactly
fit the model). Maybe this demo should use multinomial emissions...
''')
###############
# load data #
###############
data = np.loadtxt(os.path.join(os.path.dirname(__file__),'example-data.txt'))[:2500]
T = data.shape[0]
#########################
# posterior inference #
#########################
# Set the weak limit truncation level
Nmax = 25
# and some hyperparameters
obs_dim = data.shape[1]
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.eye(obs_dim),
'kappa_0':0.25,
'nu_0':obs_dim+2}
### HDP-HMM without the sticky bias
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
posteriormodel = pyhsmm.models.WeakLimitHDPHMM(alpha=6.,gamma=6.,
init_state_concentration=1.,
obs_distns=obs_distns)
posteriormodel.add_data(data)
for idx in progprint_xrange(100):
posteriormodel.resample_model()
posteriormodel.plot()
plt.gcf().suptitle('HDP-HMM sampled model after 100 iterations')
### HDP-HMM with "sticky" initialization
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
posteriormodel = pyhsmm.models.WeakLimitHDPHMM(alpha=6.,gamma=6.,
init_state_concentration=1.,
obs_distns=obs_distns)
# Start with a "sticky" state sequence
z_init = np.random.randint(0, Nmax, size=(T//5)).repeat(5)
posteriormodel.add_data(data, stateseq=z_init)
# Initialize the parameters of the model, holding the stateseq fixed
for _ in progprint_xrange(10):
posteriormodel.resample_parameters()
for idx in progprint_xrange(100):
posteriormodel.resample_model()
posteriormodel.plot()
plt.gcf().suptitle('HDP-HMM (sticky initialization) sampled model after 100 iterations')
### Sticky-HDP-HMM
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
posteriormodel = pyhsmm.models.WeakLimitStickyHDPHMM(
kappa=50.,alpha=6.,gamma=6.,init_state_concentration=1.,
obs_distns=obs_distns)
posteriormodel.add_data(data)
for idx in progprint_xrange(100):
posteriormodel.resample_model()
posteriormodel.plot()
plt.gcf().suptitle('Sticky HDP-HMM sampled model after 100 iterations')
plt.show()
|
mattjj/pyhsmm
|
examples/hmm.py
|
Python
|
mit
| 3,066
|
"""
Autopsy Forensic Browser
Copyright 2016 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Double
from java.lang import Long
from java.sql import Connection
from java.sql import DriverManager
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Blackboard
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
import traceback
import general
"""
Finds and parses the Google Maps database.
"""
class GoogleMapLocationAnalyzer(general.AndroidComponentAnalyzer):
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
def analyze(self, dataSource, fileManager, context):
try:
absFiles = fileManager.findFiles(dataSource, "da_destination_history")
if absFiles.isEmpty():
return
for abstractFile in absFiles:
try:
jFile = File(Case.getCurrentCase().getTempDirectory(), str(abstractFile.getId()) + abstractFile.getName())
ContentUtils.writeToFile(abstractFile, jFile, context.dataSourceIngestIsCancelled)
self.__findGeoLocationsInDB(jFile.toString(), abstractFile)
except Exception as ex:
self._logger.log(Level.SEVERE, "Error parsing Google map locations", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Error finding Google map locations", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
def __findGeoLocationsInDB(self, databasePath, abstractFile):
if not databasePath:
return
try:
Class.forName("org.sqlite.JDBC") # load JDBC driver
connection = DriverManager.getConnection("jdbc:sqlite:" + databasePath)
statement = connection.createStatement()
except (ClassNotFoundException, SQLException) as ex:
self._logger.log(Level.SEVERE, "Error opening database", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
return
try:
resultSet = statement.executeQuery(
"SELECT time, dest_lat, dest_lng, dest_title, dest_address, source_lat, source_lng FROM destination_history;")
while resultSet.next():
time = Long.valueOf(resultSet.getString("time")) / 1000
dest_title = resultSet.getString("dest_title")
dest_address = resultSet.getString("dest_address")
dest_lat = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("dest_lat"))
dest_lng = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("dest_lng"))
source_lat = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("source_lat"))
source_lng = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("source_lng"))
artifact = abstractFile.newArtifact(BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_ROUTE)
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_CATEGORY, general.MODULE_NAME, "Destination"))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_DATETIME, general.MODULE_NAME, time))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LATITUDE_END, general.MODULE_NAME, dest_lat))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LONGITUDE_END, general.MODULE_NAME, dest_lng))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LATITUDE_START, general.MODULE_NAME, source_lat))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LONGITUDE_START, general.MODULE_NAME, source_lng))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_NAME, general.MODULE_NAME, dest_title))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_LOCATION, general.MODULE_NAME, dest_address))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PROG_NAME, general.MODULE_NAME, "Google Maps History"))
try:
# index the artifact for keyword search
blackboard = Case.getCurrentCase().getServices().getBlackboard()
blackboard.indexArtifact(artifact)
except Blackboard.BlackboardException as ex:
self._logger.log(Level.SEVERE, "Unable to index blackboard artifact " + artifact.getArtifactID(), ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
MessageNotifyUtil.Notify.error("Failed to index GPS route artifact for keyword search.", artifact.getDisplayName())
except Exception as ex:
self._logger.log(Level.SEVERE, "Error parsing Google map locations to the blackboard", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
finally:
try:
if resultSet is not None:
resultSet.close()
statement.close()
connection.close()
except Exception as ex:
self._logger.log(Level.SEVERE, "Error closing the database", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
# add periods 6 decimal places before the end.
@staticmethod
def convertGeo(s):
length = len(s)
if length > 6:
return Double.valueOf(s[0 : length-6] + "." + s[length-6 : length])
else:
return Double.valueOf(s)
|
dgrove727/autopsy
|
InternalPythonModules/android/googlemaplocation.py
|
Python
|
apache-2.0
| 7,132
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from platform import python_version
from flask import flash, redirect, render_template, request, session
from markupsafe import Markup
from requests.exceptions import HTTPError, RequestException, Timeout
import indico
from indico.core.config import config
from indico.core.db import db
from indico.core.db.sqlalchemy.util.queries import get_postgres_version
from indico.modules.auth import Identity, login_user
from indico.modules.bootstrap.forms import BootstrapForm
from indico.modules.cephalopod.util import register_instance
from indico.modules.core.settings import core_settings
from indico.modules.users import User
from indico.util.i18n import _, get_all_locales
from indico.util.string import to_unicode
from indico.util.system import get_os
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import url_for
from indico.web.rh import RH
from indico.web.util import url_for_index
class RHBootstrap(RH):
def _process_GET(self):
if User.query.filter_by(is_system=False).has_rows():
return redirect(url_for_index())
return render_template('bootstrap/bootstrap.html',
form=BootstrapForm(),
timezone=config.DEFAULT_TIMEZONE,
languages=get_all_locales(),
operating_system=get_os(),
postgres_version=get_postgres_version(),
indico_version=indico.__version__,
python_version=python_version())
def _process_POST(self):
if User.query.filter_by(is_system=False).has_rows():
return redirect(url_for_index())
setup_form = BootstrapForm(request.form)
if not setup_form.validate():
flash(_("Some fields are invalid. Please, correct them and submit the form again."), 'error')
return redirect(url_for('bootstrap.index'))
# Creating new user
user = User()
user.first_name = to_unicode(setup_form.first_name.data)
user.last_name = to_unicode(setup_form.last_name.data)
user.affiliation = to_unicode(setup_form.affiliation.data)
user.email = to_unicode(setup_form.email.data)
user.is_admin = True
identity = Identity(provider='indico', identifier=setup_form.username.data, password=setup_form.password.data)
user.identities.add(identity)
db.session.add(user)
db.session.flush()
user.settings.set('timezone', config.DEFAULT_TIMEZONE)
user.settings.set('lang', session.lang or config.DEFAULT_LOCALE)
login_user(user, identity)
full_name = user.full_name # needed after the session closes
db.session.commit()
# Configuring server's settings
core_settings.set('site_organization', setup_form.affiliation.data)
message = get_template_module('bootstrap/flash_messages.html').bootstrap_success(name=full_name)
flash(Markup(message), 'success')
# Activate instance tracking
if setup_form.enable_tracking.data:
contact_name = setup_form.contact_name.data
contact_email = setup_form.contact_email.data
try:
register_instance(contact_name, contact_email)
except (HTTPError, ValueError) as err:
message = get_template_module('bootstrap/flash_messages.html').community_error(err=err)
category = 'error'
except Timeout:
message = get_template_module('bootstrap/flash_messages.html').community_timeout()
category = 'error'
except RequestException as exc:
message = get_template_module('bootstrap/flash_messages.html').community_exception(exc=exc)
category = 'error'
else:
message = get_template_module('bootstrap/flash_messages.html').community_success()
category = 'success'
flash(Markup(message), category)
return redirect(url_for_index())
|
eliasdesousa/indico
|
indico/modules/bootstrap/controllers.py
|
Python
|
gpl-3.0
| 4,874
|
##
# Copyright 2021 Jessica Tallon, Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django.apps import AppConfig
class RouterConfig(AppConfig):
name = "inboxen.router"
verbose_name = "Inboxen Router"
def ready(self):
from inboxen.router.utils import set_salmon_encoding_error_policy # noqa
set_salmon_encoding_error_policy()
|
Inboxen/Inboxen
|
inboxen/router/apps.py
|
Python
|
agpl-3.0
| 1,046
|
import npc
import pytest
from util import fixture_dir, load_json
import json
@pytest.fixture
def list_json_output(tmp_path, prefs):
def make_list(*search_parts, outformat='json', metadata=None, prefs=prefs, title=None):
tmpdir = tmp_path / 'list'
tmpdir.mkdir()
outfile = tmpdir / 'output.json'
outfile.touch()
search = fixture_dir('listing', *search_parts)
npc.commands.listing.make_list(search, fmt=outformat, metadata=metadata, outfile=str(outfile), prefs=prefs, title=title)
return load_json(outfile)
return make_list
|
aurule/npc
|
tests/commands/listing/conftest.py
|
Python
|
mit
| 587
|
"""
Django settings for openshift project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# 準備要將資料檔案送到 data 目錄
import imp
ON_OPENSHIFT = False
if 'OPENSHIFT_REPO_DIR' in os.environ:
ON_OPENSHIFT = True
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
default_keys = { 'SECRET_KEY': 'tjy&7h%c=q01+c5i@_-t)&n2c+y*tn7v_)vbdksnlv@s5qh%e_' }
use_keys = default_keys
if ON_OPENSHIFT:
imp.find_module('openshiftlibs')
import openshiftlibs
use_keys = openshiftlibs.openshift_secure(default_keys)
SECRET_KEY = ')u(apy!ie&i)5o3@=s9*7+t+3vpmt9p8i!wr!&t+fs(qz)hk8i'
# SECURITY WARNING: don't run with debug turned on in production!
if ON_OPENSHIFT:
DEBUG = False
else:
DEBUG = True
TEMPLATE_DEBUG = True
if DEBUG:
ALLOWED_HOSTS = []
else:
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
if ON_OPENSHIFT:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.environ['OPENSHIFT_DATA_DIR'], 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static')
STATIC_URL = '/static/'
|
coursemdetw/2014django
|
wsgi/openshift/settings.py
|
Python
|
gpl-2.0
| 2,745
|
# vim:fileencoding=utf8:et:ts=4:sts=4:sw=4:ft=python
# Settings for production environment
from django.conf import settings
# DEBUG Options: Select "True" for development use, "False" for production use
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = False
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/var/www/' + settings.INSTANCE_NAME + '/htdocs/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/var/www/' + settings.INSTANCE_NAME + '/htdocs/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or
# "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
settings.PROJECT_ROOT + '/okupy/static',
'/var/www/gentoo-identity-bootstrap',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'compressor.finders.CompressorFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
settings.PROJECT_ROOT + '/okupy/templates/'
)
|
dastergon/identity.gentoo.org
|
okupy/settings/production.py
|
Python
|
agpl-3.0
| 3,231
|
"""System engine"""
__author__ = "Zacharias El Banna"
__version__ = "5.6"
__build__ = 214
__all__ = ['Context','WorkerPool']
from os import path as ospath, getpid, walk
from json import loads, load, dumps
from importlib import import_module, reload as reload_module
from threading import Thread, Event, BoundedSemaphore, enumerate as thread_enumerate
from time import localtime, strftime, time, sleep
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import unquote, parse_qs
from functools import lru_cache, partial
from base64 import b64encode, b64decode
from datetime import datetime,timedelta, timezone
from crypt import crypt
from rims.core.common import DB, rest_call
########################################## Tools ###########################################
@lru_cache(maxsize = 256)
def cached_file_open(aFile):
with open(aFile, 'rb') as file:
return file.read()
########################################### Context ########################################
#
# Main state object, contains config, workers, modules and calls etc..
#
# TODO EXTRA: make multiprocessing - preforking - and multithreaded
#
# - http://stupidpythonideas.blogspot.com/2014/09/sockets-and-multiprocessing.html
# - https://stackoverflow.com/questions/1293652/accept-with-sockets-shared-between-multiple-processes-based-on-apache-prefork
# - CTX data is unique/non-mutable and could be parallelized
#
class Context(object):
def __init__(self,aConfig):
""" Context init - create the infrastructure but populate later
- Set node ID
- Prepare database and workers
- Load config
- initiate the 'kill' switch
- create datastore (i.e. dict) for nodes, services and external modules
"""
if isinstance(aConfig, dict):
self.config = aConfig
else:
with open(aConfig,'r') as config_file:
self.config = load(config_file)
self.config['config_file'] = aConfig
self.config['salt'] = self.config.get('salt','WBEUAHfO')
self.config['mode'] = self.config.get('mode','api')
self.config['workers']= self.config.get('workers',20)
self.node = self.config['id']
self.db = DB(self.config['database']['name'],self.config['database']['host'],self.config['database']['username'],self.config['database']['password']) if self.config.get('database') else None
self.workers = WorkerPool(self.config['workers'],self)
self.path = ospath.abspath(ospath.join(ospath.dirname(__file__), '..'))
self.tokens = {}
self.nodes = {}
self.external = {}
self.services = {}
self.config['logging'] = self.config.get('logging',{})
self.config['logging']['rest'] = self.config['logging'].get('rest',{'enabled':False,'file':None})
self.config['logging']['system'] = self.config['logging'].get('system',{'enabled':False,'file':None})
try:
with open(self.config['site_file'],'r') as sfile:
self.site = load(sfile)
except:
self.log("Site file could not be loaded/found: %s"%self.config.get('site_file','N/A'))
self.site = {}
self._kill = Event()
self._analytics= {'files':{},'modules':{}}
self.rest_call = rest_call
def __str__(self):
return "Context(node=%s)"%(self.node)
#
def clone(self):
""" Clone itself and non thread-safe components, avoid using copy and having to copy everything manually... """
from copy import copy
ctx_new = copy(self)
ctx_new.db = DB(self.config['database']['name'],self.config['database']['host'],self.config['database']['username'],self.config['database']['password']) if self.config.get('database') else None
return ctx_new
#
def wait(self):
self._kill.wait()
#
def system_info(self,aNode):
""" Function retrieves all central info for a certain node, or itself if node is given"""
if len(aNode) == 0 or aNode is None:
info = {'nodes':self.nodes,'services':self.services,'config':self.config,'tasks':self.workers.scheduler_tasks(),'site':(len(self.site) > 0),'version':__version__,'build':__build__}
elif self.config.get('database'):
info = {'tokens':{}}
with self.db as db:
db.do("SELECT id, node, url,system FROM nodes")
info['nodes'] = {x['node']:{'id':x['id'],'url':x['url'],'system':x['system']} for x in db.get_rows()}
db.do("SELECT servers.id, node, st.service, st.type FROM servers LEFT JOIN service_types AS st ON servers.type_id = st.id")
info['services'] = {x['id']:{'node':x['node'],'service':x['service'],'type':x['type']} for x in db.get_rows()}
db.do("SELECT tasks.id, user_id, module, function, args, frequency, output FROM tasks LEFT JOIN nodes ON nodes.id = tasks.node_id WHERE node = '%s'"%aNode)
info['tasks'] = db.get_rows()
for task in info['tasks']:
task['output'] = (task['output']== 1)
db.do("DELETE FROM user_tokens WHERE created + INTERVAL 5 DAY < NOW()")
db.do("SELECT user_id, token, created + INTERVAL 5 DAY as expires FROM user_tokens ORDER BY created DESC")
info['tokens'] = {x['token']:(x['user_id'],x['expires'].strftime("%a, %d %b %Y %H:%M:%S GMT")) for x in db.get_rows()}
info['version'] = __version__
info['build'] = __build__
else:
info = self.rest_call("%s/system/environment/%s/%s"%(self.config['master'],aNode,__build__), aDataOnly = True)
return info
#
def load_system(self):
""" Load info from DB or retrieve from master node. Add tasks to queue, return true if system loaded successfully"""
try:
system = self.system_info(self.node)
except Exception as e:
print(str(e))
return False
else:
self.nodes.update(system['nodes'])
self.services.update(system['services'])
self.tokens.update(system['tokens'])
for task in system['tasks']:
try: freq = int(task.pop('frequency'))
except: freq = 0
self.log("Adding task: %(module)s/%(function)s"%task)
self.workers.add_task(task['module'],task['function'],freq,args = loads(task['args']), output = (task['output'] == 1 or task['output'] == True))
if __build__ != system['build']:
self.log("Build mismatch between master and node: %s != %s"%(__build__,system['build']))
return True
#
def start(self):
""" Start "moving" parts of context, set up socket and start processing incoming requests and scheduled tasks """
from sys import setcheckinterval
from signal import signal, SIGINT, SIGUSR1, SIGUSR2
from socket import socket, AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
try:
addr = ("", int(self.config['port']))
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(addr)
sock.listen(5)
self.workers.start(addr,sock)
for sig in [SIGINT, SIGUSR1, SIGUSR2]:
signal(sig, self.signal_handler)
setcheckinterval(200)
except Exception as e:
print(str(e))
return False
else:
return True
#
def close(self):
self.workers.close()
self._kill.set()
#
def signal_handler(self, sig, frame):
""" Signal handler instantiate OS signalling mechanisms to override standard behavior
- SIGINT: close system
- SIGUSR1: reload system modules and cache files
- SIGUSR2: report system state through stdout print
"""
from signal import SIGINT, SIGUSR1, SIGUSR2
if sig == SIGINT:
self.close()
elif sig == SIGUSR1:
print("\n".join(self.module_reload()))
cached_file_open.cache_clear()
elif sig == SIGUSR2:
data = self.report()
data.update(self.config)
data['tokens'] = {k:(v[0],v[1]) for k,v in self.tokens.items()}
data['site'] = self.site
print("System Info:\n_____________________________\n%s\n_____________________________"%(dumps(data,indent=2, sort_keys=True)))
#
def debugging(self):
return (self.config['mode'] == 'debug')
################################## Service Functions #################################
#
def node_function(self, aNode, aModule, aFunction, **kwargs):
""" Node function freezes a REST call or a function with enough info so that they can be used multiple times AND interchangably """
if self.node != aNode:
kwargs['aDataOnly'] = True
ret = partial(self.rest_call,"%s/api/%s/%s"%(self.nodes[aNode]['url'],aModule,aFunction), **kwargs)
else:
module = import_module("rims.rest.%s"%aModule)
fun = getattr(module,aFunction,lambda x,y: None)
ret = partial(fun,self)
return ret
#
def module_register(self, aName):
""" Register "external" modules - for dynamic modules """
ret = {}
try: module = import_module(aName)
except Exception as e:
ret['status'] = 'NOT_OK'
ret['info'] = str(e)
else:
ret['status'] = 'OK'
self.external[aName] = module
ret['import'] = repr(module)
return ret
#
def module_reload(self):
""" Reload modules and return which ones were reloaded """
from sys import modules as sys_modules
from types import ModuleType
modules = {x:sys_modules[x] for x in sys_modules.keys() if x.startswith('rims.')}
modules.update(self.external)
ret = []
for k,v in modules.items():
if isinstance(v,ModuleType):
try: reload_module(v)
except: pass
else: ret.append(k)
ret.sort()
return ret
#
def analytics(self, aType, aGroup, aItem):
""" Function provides basic usage analytics """
tmp = self._analytics[aType].get(aGroup,{})
tmp[aItem] = tmp.get(aItem,0) + 1
self._analytics[aType][aGroup] = tmp
#
# @debug_decorator('log')
def log(self,aMsg):
syslog = self.config['logging']['system']
if syslog['enabled']:
with open(syslog['file'], 'a') as f:
f.write(str("%s: %s\n"%(strftime('%Y-%m-%d %H:%M:%S', localtime()), aMsg)))
#
def report(self):
from sys import version, modules as sys_modules, path as syspath
from types import ModuleType
node_url = self.nodes[self.node]['url']
db_counter = {}
for t in thread_enumerate():
try:
for k,v in t._ctx.db.count.items():
db_counter[k] = db_counter.get(k,0) + v
except:pass
output = {
'Node URL':node_url,
'Package path':self.path,
'Python version':version.replace('\n',''),
'Workers pool':self.workers.alive(),
'Workers idle':self.workers.idles(),
'Workers queue':self.workers.queue_size(),
'Servers':self.workers.servers(),
'Database':', '.join("%s:%s"%(k.lower(),v) for k,v in db_counter.items()),
'OS path':',' .join(syspath),
'OS pid':getpid()}
try: output['File cache'] = repr(cached_file_open.cache_info())
except:pass
if self.config.get('database'):
with self.db as db:
oids = {}
for type in ['devices','device_types']:
db.do("SELECT DISTINCT oid FROM %s"%type)
oids[type] = [x['oid'] for x in db.get_rows()]
output['Unhandled detected OIDs']= ",".join(str(x) for x in oids['devices'] if x not in oids['device_types'])
output.update({'Mounted directory: %s'%k:"%s => %s/files/%s/"%(v,node_url,k) for k,v in self.config.get('files',{}).items()})
output.update({'Modules (%03d)'%i:x for i,x in enumerate(sys_modules.keys()) if x.startswith('rims')})
output['analytics'] = {}
for type in ['modules','files']:
for g,i in self._analytics[type].items():
output['analytics'][type] = {'%s/%s'%(g,f):c for f,c in i.items()}
return output
########################################## WorkerPool ########################################
#
#
class WorkerPool(object):
###################################### Workers ########################################
#
class ScheduleWorker(Thread):
# Class that provides a sleep-then-repeat function
def __init__(self, aFrequency, aFunc, aName, aOutput, aArgs, aQueue, aAbort):
Thread.__init__(self)
self._freq = aFrequency
self._queue = aQueue
self._func = aFunc
self.name = aName
self._output= aOutput
self._args = aArgs
self._abort = aAbort
self.daemon = True
self.start()
def run(self):
self._abort.wait(self._freq - int(time())%self._freq)
while not self._abort.is_set():
self._queue.put((self._func,'TASK',None,self._output,self._args,None))
self._abort.wait(self._freq)
return False
#
#
class QueueWorker(Thread):
def __init__(self, aNumber, aAbort, aQueue, aContext):
Thread.__init__(self)
self._n = aNumber
self.name = "QueueWorker(%02d)"%aNumber
self._abort = aAbort
self._idle = Event()
self._queue = aQueue
self._ctx = aContext.clone()
self.daemon = True
self.start()
def run(self):
while not self._abort.is_set():
try:
self._idle.set()
(func, rest, sema, output, args, kwargs) = self._queue.get(True)
self._idle.clear()
result = func(*args,**kwargs) if not rest else func(self._ctx, args)
if output:
self._ctx.log("%s - %s => %s"%(self.name,repr(func),dumps(result)))
except Exception as e:
self._ctx.log("%s - ERROR: %s => %s"%(self.name,repr(func),str(e)))
if self._ctx.config['mode'] == 'debug':
from traceback import format_exc
for n,v in enumerate(format_exc().split('\n')):
self._ctx.log("%s - DEBUG-%02d => %s"%(self.name,n,v))
finally:
if sema:
sema.release()
self._queue.task_done()
# TODO clean up DB connections
return False
#
#
class ServerWorker(Thread):
def __init__(self, aNumber, aAddress, aSocket, aContext):
Thread.__init__(self)
self.name = "ServerWorker(%02d)"%aNumber
self.daemon = True
httpd = HTTPServer(aAddress, SessionHandler, False)
self._httpd = httpd
httpd.socket = aSocket
httpd._ctx = self._ctx = aContext.clone()
httpd.server_bind = httpd.server_close = lambda self: None
self.start()
def run(self):
try: self._httpd.serve_forever()
except: pass
def shutdown(self):
self._httpd.shutdown()
#
#
def __init__(self, aWorkers, aContext):
from queue import Queue
self._queue = Queue(0)
self._count = aWorkers
self._ctx = aContext
self._abort = Event()
self._sock = None
self._idles = []
self._workers = []
self._servers = []
self._scheduler = []
def __str__(self):
return "WorkerPool(count = %s, queue = %s, schedulers = %s, alive = %s)"%(self._count,self._queue.qsize(),len(self._scheduler),self.alive())
def start(self, aAddr, aSock):
self._abort.clear()
self._sock = aSock
self._servers = [self.ServerWorker(n,aAddr,aSock,self._ctx) for n in range(4)]
self._workers = [self.QueueWorker(n, self._abort, self._queue, self._ctx) for n in range(self._count)]
self._idles = [w._idle for w in self._workers]
def close(self):
""" Abort set abort state and inject dummy tasks to kill off workers. There might be running tasks so don't add more than enough """
def dummy(): pass
self._abort.set()
try: self._sock.close()
except: pass
finally: self._sock = None
active = self.alive()
while active > 0:
for x in range(0,active - self._queue.qsize()):
self._queue.put((dummy,False,None,False,[],{}))
while not self._queue.empty() and self.alive() > 0:
sleep(0.1)
self._workers = [x for x in self._workers if x.is_alive()]
active = self.alive()
def alive(self):
return len([x for x in self._workers if x.is_alive()])
def idles(self):
return len([x for x in self._idles if x.is_set()])
def servers(self):
return len([x for x in self._servers if x.is_alive()])
def queue_size(self):
return self._queue.qsize()
def scheduler_size(self):
return len([x for x in self._scheduler if x.is_alive()])
def scheduler_tasks(self):
return [(x.name,x._freq) for x in self._scheduler]
def semaphore(self,aSize):
return BoundedSemaphore(aSize)
def block(self,aSema,aSize):
for i in range(aSize):
aSema.acquire()
##################### FUNCTIONs ########################
#
def add_function(self, aFunction, *args, **kwargs):
self._queue.put((aFunction,False,None,False,args,kwargs))
def add_semaphore(self, aFunction, aSema, *args, **kwargs):
aSema.acquire()
self._queue.put((aFunction,False,aSema,False,args,kwargs))
####################### TASKs ###########################
#
def add_task(self, aModule, aFunction, aFrequency = 0, **kwargs):
try:
mod = import_module("rims.rest.%s"%aModule)
func = getattr(mod, aFunction, None)
except: self._ctx.log("WorkerPool ERROR: adding task failed (%s/%s)"%(aModule,aFunction))
else:
if aFrequency > 0:
self._scheduler.append(self.ScheduleWorker(aFrequency, func, "%s/%s"%(aModule,aFunction), kwargs.get('output',False),kwargs.get('args',{}), self._queue, self._abort))
else:
self._queue.put((func,True,None,kwargs.get('output',False),kwargs.get('args',{}),None))
###################################### Session Handler ######################################
#
class SessionHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self._headers = {}
self._body = b'null'
self._ctx = args[2]._ctx
BaseHTTPRequestHandler.__init__(self,*args, **kwargs)
def header(self):
# Sends X-Code as response
self._headers.update({'X-Method':self.command,'Server':'RIMS Engine %s.%s'%(__version__,__build__),'Date':self.date_time_string()})
code = self._headers.pop('X-Code',200)
self.wfile.write(('HTTP/1.1 %s %s\r\n'%(code,self.responses.get(code,('Other','Server specialized return code'))[0])).encode('utf-8'))
self._headers.update({'Content-Length':len(self._body),'Connection':'close'})
for k,v in self._headers.items():
try: self.send_header(k,v)
except: self.send_header('X-Header-Error',k)
self.end_headers()
def do_GET(self):
self.route()
self.header()
self.wfile.write(self._body)
def do_POST(self):
self.route()
self.header()
self.wfile.write(self._body)
def route(self):
""" Route request to the right function """
path,_,query = (self.path.lstrip('/')).partition('/')
if path in ['api','debug','external']:
self.api(path,query)
elif path in ['infra','images','files']:
self.files(path,query)
elif path == 'site' and len(query) > 0:
self.site(query)
elif path == 'auth':
self.auth()
elif path == 'system':
self.system(query)
else:
self._headers.update({'X-Process':'no route','Location':'%s/site/portal_main'%(self._ctx.nodes[self._ctx.node]['url']),'X-Code':301})
#
#
def api(self,path,query):
""" API serves the REST functions x.y.z.a:<port>/api|external/module/function
- extra arguments can be sent as GET or using headers (using X-prefix in the latter case): node
- Should get a cookie
"""
extras = {}
(api,_,get) = query.partition('?')
(mod,_,fun) = api.partition('/')
self._ctx.analytics('modules',mod,fun)
for part in get.split("&"):
(k,_,v) = part.partition('=')
extras[k] = v
self._headers.update({'X-Module':mod, 'X-Function': fun,'Content-Type':"application/json; charset=utf-8",'Access-Control-Allow-Origin':"*",'X-Process':'API','X-Route':self.headers.get('X-Route',extras.get('node',self._ctx.node if not mod == 'system' else 'master'))})
try:
length = int(self.headers.get('Content-Length',0))
raw = self.rfile.read(length).decode() if length > 0 else '{}'
if self.headers.get('Content-Type') == 'application/json': args = loads(raw)
elif self.headers.get('Content-Type') == 'application/x-www-form-urlencoded': args = { k: l[0] for k,l in parse_qs(raw, keep_blank_values=1).items() }
else: args = {}
except:
args = {}
if self._ctx.config['logging']['rest']['enabled'] and self.headers.get('X-Log','true') == 'true':
with open(self._ctx.config['logging']['rest']['file'], 'a') as f:
f.write(str("%s: %s '%s' @%s(%s)\n"%(strftime('%Y-%m-%d %H:%M:%S', localtime()), api, dumps(args) if api != "system/task_worker" else "N/A", self._ctx.node, get.strip())))
try:
if self._headers['X-Route'] == self._ctx.node:
module = import_module("rims.rest.%s"%mod) if not path == 'external' else self._ctx.external.get(mod)
self._body = dumps(getattr(module,fun, lambda x,y: None)(self._ctx, args)).encode('utf-8')
else:
self._body = self._ctx.rest_call("%s/%s/%s"%(self._ctx.nodes[self._headers['X-Route']]['url'],path,query), aArgs = args, aDecode = False, aDataOnly = True)
except Exception as e:
if not (isinstance(e.args[0],dict) and e.args[0].get('code')):
error = {'X-Args':args, 'X-Exception':type(e).__name__, 'X-Code':600, 'X-Info':','.join(map(str,e.args))}
else:
error = {'X-Args':args, 'X-Exception':e.args[0].get('exception'), 'X-Code':e.args[0]['code'], 'X-Info':e.args[0].get('info')}
self._headers.update(error)
if self._ctx.config['mode'] == 'debug':
from traceback import format_exc
for n,v in enumerate(format_exc().split('\n')):
self._headers["X-Debug-%02d"%n] = v
#
#
def site(self,query):
""" Site - serve AJAX information """
api,_,get = query.partition('?')
(mod,_,fun) = api.partition('_')
stream = Stream(self,get)
self._headers.update({'Content-Type':'text/html; charset=utf-8','X-Code':200,'X-Process':'site'})
try:
module = import_module("rims.site.%s"%mod)
getattr(module,fun,lambda x:None)(stream)
except Exception as e:
stream.wr("<DETAILS CLASS='web'><SUMMARY CLASS='red'>ERROR</SUMMARY>API: rims.site.%s<BR>"%(api))
try:
stream.wr("Type: %s<BR>Code: %s<BR><DETAILS open='open'><SUMMARY>Info</SUMMARY>"%(e.args[0]['exception'],e.args[0]['code']))
try:
keys = sorted(e.args[0]['info'].keys())
for k in keys:
stream.wr("%s: %s<BR>"%(k,e.args[0]['info'][k]))
except: stream.wr(e.args[0]['info'])
stream.wr("</DETAILS>")
except:
stream.wr("Type: %s<BR><DETAILS open='open'><SUMMARY>Info</SUMMARY><PRE>%s</PRE></DETAILS>"%(type(e).__name__,str(e)))
stream.wr("<DETAILS><SUMMARY>Args</SUMMARY><CODE>%s</CODE></DETAILS>"%(",".join(stream._form.keys())))
stream.wr("<DETAILS><SUMMARY>Cookie</SUMMARY><CODE>%s</CODE></DETAILS></DETAILS>"%(stream._cookies))
self._body = stream.output().encode('utf-8')
#
#
def files(self,path,query):
""" Serve "common" system files and also route 'files' """
query = unquote(query)
self._ctx.analytics('files', path, query)
self._headers['X-Process'] = 'files'
if query.endswith(".js"):
self._headers['Content-type']='application/javascript; charset=utf-8'
elif query.endswith(".css"):
self._headers['Content-type']='text/css; charset=utf-8'
try:
if not path == 'files':
fullpath = ospath.join(self._ctx.path,path,query)
else:
param,_,file = query.partition('/')
fullpath = ospath.join(self._ctx.config['files'][param],file)
if not fullpath.endswith("/"):
if not path == 'files':
self._body = cached_file_open(fullpath)
else:
with open(fullpath, 'rb') as file:
self._body = file.read()
else:
self._headers['Content-type']='text/html; charset=utf-8'
_, _, filelist = next(walk(fullpath), (None, None, []))
self._body = ("<BR>".join("<A HREF='{0}'>{0}</A>".format(file) for file in filelist)).encode('utf-8')
except Exception as e:
self._headers.update({'X-Exception':str(e),'X-Query':query,'X-Path':path,'Content-type':'text/html; charset=utf-8','X-Code':404})
self._body = b''
#
#
def auth(self):
""" Authenticate using node function instead of API """
self._headers.update({'Content-type':'application/json; charset=utf-8','X-Process':'auth','X-Code':401})
output = {'status':'NOT_OK'}
try:
length = int(self.headers.get('Content-Length',0))
args = loads(self.rfile.read(length).decode()) if length > 0 else {}
username, password = args['username'], args['password']
# except: output['info'] = "Provide username and password arguments"
except Exception as e:
output['info'] = {'argument':str(e)}
else:
if self._ctx.node == 'master':
from rims.core.genlib import random_string
passcode = crypt(password, "$1$%s$"%self._ctx.config['salt']).split('$')[3]
with self._ctx.db as db:
if (db.do("SELECT id, theme FROM users WHERE alias = '%s' and password = '%s'"%(username,passcode)) == 1):
output.update(db.get_row())
output['token'] = random_string(16)
db.do("INSERT INTO user_tokens (user_id,token,source_ip,source_port) VALUES(%s,'%s',INET_ATON('%s'),%s)"%(output['id'],output['token'],self.client_address[0],self.client_address[1]))
expires = datetime.now(timezone.utc) + timedelta(days=5)
output['expires'] = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
self._ctx.tokens[output['token']] = (output['id'],expires)
self._headers['X-Code'] = 200
output['status'] = 'OK'
else:
output['info'] = {'authentication':'username and password combination not found','username':username,'passcode':passcode}
else:
try:
output = self._ctx.rest_call("%s/auth"%(self._ctx.config['master']), aArgs = args, aDataOnly = True)
self._headers['X-Code'] = 200
output['status'] = 'OK'
except Exception as e:
output = {'info':e.args[0]}
self._headers['X-Code'] = output['code']
output['node'] = self._ctx.node
self._body = dumps(output).encode('utf-8')
#
#
def system(self,query):
""" /system/<op>/<args> """
self._headers.update({'Content-type':'application/json; charset=utf-8','X-Process':'system'})
op,_,arg = query.partition('/')
if op == 'environment':
env = arg.partition('/')
if len(env[0]) > 0:
self._ctx.log("Node '%s' connected, running version: %s"%(env[0],env[2]))
output = self._ctx.system_info(env[0])
elif op == 'reload':
res = self._ctx.module_reload()
cached_file_open.cache_clear()
output = {'modules':res}
elif op == 'report':
output = self._ctx.report()
elif op == 'register':
length = int(self.headers.get('Content-Length',0))
args = loads(self.rfile.read(length).decode()) if length > 0 else {}
params = {'node':arg,'url':"http://%s:%s"%(self.client_address[0],args['port']),'system':args.get('system','0')}
output = {'status':'OK','resources_found':0,'resources_new':0}
sql = "INSERT resources (node,title,href,icon,type,user_id,view) VALUES ('%s','{}','{}','{}','{}',1,0) ON DUPLICATE KEY UPDATE id = id"%arg
with self._ctx.db as db:
output['update'] = db.insert_dict('nodes',params,"ON DUPLICATE KEY UPDATE system = %(system)s, url = '%(url)s'"%params)
for type in args.get('site',{}).keys():
resources = args['site'][type]
for name,item in resources.items():
output['resources_found'] += 1
try: output['resources_new'] += db.do(sql.format(name.title(),item['href'],item['icon'],type))
except Exception as err:
output['status'] = 'NOT_OK'
output['info'] = str(err)
self._ctx.log("Registered node %s: update:%s resources_found:%s resources_new:%s"%(arg,output['update'],output['resources_found'],output['resources_new']))
elif op == 'import':
output = self._ctx.module_register(arg)
elif op == 'shutdown':
self._ctx.close()
output = {'status':'OK','state':'shutdown in progress'}
elif op == 'mode':
self._ctx.config['mode'] = arg
output = {'status':'OK','mode':arg}
elif op == 'config':
length = int(self.headers.get('Content-Length',0))
args = loads(self.rfile.read(length).decode()) if length > 0 else {}
self._ctx.config.update(args)
else:
output = {'status':'NOT_OK','info':'system/<import|register|environment|sync|reload|mode|shutdown>/<args: node|module_to_import> where import, environment without args and reload runs on any node'}
self._body = dumps(output).encode('utf-8')
########################################### Web stream ########################################
#
class Stream(object):
def __init__(self,aHandler, aGet):
self._form = {}
self._node = aHandler._ctx.node
self._ctx = aHandler._ctx
self._body = []
self._cookies = {}
try: cookie_str = aHandler.headers['Cookie'].split('; ')
except: pass
else:
for cookie in cookie_str:
k,_,v = cookie.partition('=')
try: self._cookies[k] = self.cookie_decode(v)
except: self._cookies[k] = v
try: body_len = int(aHandler.headers.get('Content-Length',0))
except: body_len = 0
if body_len > 0 or len(aGet) > 0:
if body_len > 0:
self._form.update({ k: l[0] for k,l in parse_qs(aHandler.rfile.read(body_len).decode(), keep_blank_values=1).items() })
if len(aGet) > 0:
self._form.update({ k: l[0] for k,l in parse_qs(aGet, keep_blank_values=1).items() })
def __str__(self):
return "<DETAILS CLASS='web blue'><SUMMARY>Web</SUMMARY>Web object<DETAILS><SUMMARY>Cookies</SUMMARY><CODE>%s</CODE></DETAILS><DETAILS><SUMMARY>Form</SUMMARY><CODE>%s</CODE></DETAILS></DETAILS>"%(str(self._cookies),self._form)
def output(self):
return ("".join(self._body))
def wr(self,aHTML):
self._body.append(aHTML)
def url(self):
return self._ctx.nodes[self._node]['url']
def node(self):
return self._node
def cookie(self,aName):
return self._cookies.get(aName,{})
def cookie_encode(self,aCookie):
return b64encode(dumps(aCookie).encode('utf-8')).decode()
def cookie_decode(self,aCookie):
return loads(b64decode(aCookie).decode())
def args(self):
return self._form
def __getitem__(self,aKey):
return self._form.get(aKey,None)
def get(self,aKey,aDefault = None):
return self._form.get(aKey,aDefault)
def get_args(self,aExcept = []):
return "&".join("%s=%s"%(k,v) for k,v in self._form.items() if not k in aExcept)
def button(self,aImg,**kwargs):
return "<A CLASS='btn btn-%s z-op small' %s></A>"%(aImg," ".join("%s='%s'"%i for i in kwargs.items()))
def rest_call(self, aAPI, aArgs = None, aTimeout = 60):
return self._ctx.rest_call("%s/%s/%s"%(self._ctx.nodes[self._node]['url'],self._ctx.config['mode'],aAPI), aArgs = aArgs, aTimeout = 60, aDataOnly = True)
def rest_full(self, aURL, **kwargs):
return self._ctx.rest_call(aURL, **kwargs)
def put_html(self, aTitle = None, aIcon = 'rims.ico', aTheme = None):
theme = self._ctx.site['portal'].get('theme','blue') if not aTheme else aTheme
self._body.append("<!DOCTYPE html><HEAD><META CHARSET='UTF-8'><LINK REL='stylesheet' TYPE='text/css' HREF='../infra/4.21.0.vis.min.css' /><LINK REL='stylesheet' TYPE='text/css' HREF='../infra/system.css'><LINK REL='stylesheet' TYPE='text/css' HREF='../infra/theme.%s.css'>"%theme)
if aTitle:
self._body.append("<TITLE>%s</TITLE>"%aTitle)
self._body.append("<LINK REL='shortcut icon' TYPE='image/png' HREF='../images/%s'/>"%(aIcon))
self._body.append("<SCRIPT SRC='../infra/3.1.1.jquery.min.js'></SCRIPT><SCRIPT SRC='../infra/4.21.0.vis.min.js'></SCRIPT><SCRIPT SRC='../infra/system.js'></SCRIPT>")
self._body.append("<SCRIPT>$(function() { $(document.body).on('click','.z-op',btn ) .on('focusin focusout','input, select',focus ) .on('input','.slider',slide_monitor); });</SCRIPT>")
self._body.append("</HEAD>")
def state_ascii(self, aState = None):
return {'unknown':'grey','up':'green','down':'red'}.get(aState,'orange')
|
zelbanna/sdcp
|
core/engine.py
|
Python
|
gpl-3.0
| 30,590
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 GRAP (http://www.grap.coop)
# @author Sylvain LE GAL (https://twitter.com/legalsylvain)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_document_extract_from_database
fast_suite = [
test_document_extract_from_database,
]
|
archetipo/server-tools
|
document_export_from_db/tests/__init__.py
|
Python
|
agpl-3.0
| 1,147
|
"""
Reading and Plotting data with DC.IO class
==========================================
The DC.IO class is a convenient way to handle DC data and
carry inversions within a same class. It also has several plotting utils
such as pseudosections. We show here an example of plotting DC data based
on a demonstration dataset.
"""
import numpy as np
import pandas as pd
import shutil
import os
import matplotlib.pyplot as plt
from SimPEG.electromagnetics.static import resistivity as DC
from SimPEG import Report
from SimPEG.utils.io_utils import download
###############################################################################
# Download an example DC data csv file
# ------------------------------------
#
# file origina and name
url = "https://storage.googleapis.com/simpeg/examples/dc_data.csv"
fname = download(url, folder="./test_url", overwrite=True)
# read csv using pandas
df = pd.read_csv(fname)
# header for ABMN locations
header_loc = ["Spa." + str(i + 1) for i in range(4)]
# Apparent resistivity
header_apprho = df.keys()[6]
###############################################################################
#
# Convert file to DC.IO object
# ----------------------------
#
# Number of the data
ndata = df[header_loc[0]].values.size
# ABMN locations
a = np.c_[df[header_loc[0]].values, np.zeros(ndata)]
b = np.c_[df[header_loc[1]].values, np.zeros(ndata)]
m = np.c_[df[header_loc[2]].values, np.zeros(ndata)]
n = np.c_[df[header_loc[3]].values, np.zeros(ndata)]
# Apparent resistivity
apprho = df[header_apprho].values
# Create DC.IO survey Object object
IO = DC.IO()
# Generate DC survey using IO object
dc_survey = IO.from_abmn_locations_to_survey(
a,
b,
m,
n,
survey_type="dipole-dipole",
data_dc=apprho,
data_dc_type="apparent_resistivity",
)
###############################################################################
#
# Plot
# ----
#
fig, ax = plt.subplots(1, 1, figsize=(10, 3))
IO.plotPseudoSection(
data_type="apparent_resistivity", scale="linear", clim=(0, 1000), ncontour=3, ax=ax
)
plt.show()
# clean up
shutil.rmtree(os.path.expanduser("./test_url"))
###############################################################################
# Print the version of SimPEG and dependencies
# --------------------------------------------
#
Report()
###############################################################################
# Moving Forward
# --------------
#
# If you have suggestions for improving this example, please create a `pull request on the example in SimPEG <https://github.com/simpeg/simpeg/blob/main/examples/06-dc/read_plot_DC_data_with_IO_class.py>`_
#
# You might try:
# - changing the contour levels
# - try with you own dataset
# - create a mask for negative apparent resistivities
# - ...
|
simpeg/simpeg
|
examples/04-dcip/plot_read_DC_data_with_IO_class.py
|
Python
|
mit
| 2,790
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBbmisc(RPackage):
"""Miscellaneous helper functions for and from B. Bischl and some other
guys, mainly for package development."""
homepage = "https://github.com/berndbischl/BBmisc"
url = "https://cloud.r-project.org/src/contrib/BBmisc_1.11.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/BBmisc"
version('1.11', sha256='1ea48c281825349d8642a661bb447e23bfd651db3599bf72593bfebe17b101d2')
depends_on('r-checkmate@1.8.0:', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-bbmisc/package.py
|
Python
|
lgpl-2.1
| 730
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 00:44:05 2015
@author: winpython
"""
import os
import sys, getopt
import time
import numpy as np
from cnn_training_computation import fit
import pickle, cPickle, gzip
def run():
print '... loading data'
# Load the dataset
f = gzip.open("training_data_200v6_1.pkl.gz", 'rb')
train = cPickle.load(f)
f.close()
train_set, train_label = train
data = train_set
labels = train_label
"""
# read the data, labels
data = np.genfromtxt("data/mnist_train.data")
print ". .",
test_data = np.genfromtxt("data/mnist_test.data")
print ". .",
valid_data = np.genfromtxt("data/mnist_valid.data")
labels = np.genfromtxt("data/mnist_train.solution")
"""
print ". . finished reading"
"""
# DO argmax
labels = np.argmax(labels, axis=1)
print labels
"""
"""
# normalization
amean = np.mean(data)
data = data - amean
astd = np.std(data)
data = data / astd
# normalise using coefficients from training data
test_data = (test_data - amean) / astd
valid_data = (valid_data - amean) / astd
"""
test_data = train_set
valid_data = train_set
fit(data, labels)
print "finished training"
if __name__ == '__main__':
run()
|
artmusic0/theano-learning.part03
|
Myfile_run-py_release_turble-training/run_training.py
|
Python
|
gpl-3.0
| 1,340
|
#!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Gets information about one commit from gitiles.
Example usage:
./fetch_revision_info.py 343b531d31 chromium
./fetch_revision_info.py 17b4e7450d v8
"""
import argparse
import json
import urllib2
import depot_map # pylint: disable=relative-import
_GITILES_PADDING = ')]}\'\n'
_URL_TEMPLATE = 'https://chromium.googlesource.com/%s/+/%s?format=json'
def fetch_revision_info(commit_hash, depot_name):
"""Gets information about a chromium revision."""
path = depot_map.DEPOT_PATH_MAP[depot_name]
url = _URL_TEMPLATE % (path, commit_hash)
response = urllib2.urlopen(url).read()
response_json = response[len(_GITILES_PADDING):]
response_dict = json.loads(response_json)
message = response_dict['message'].splitlines()
subject = message[0]
body = '\n'.join(message[1:])
result = {
'author': response_dict['author']['name'],
'email': response_dict['author']['email'],
'subject': subject,
'body': body,
'date': response_dict['committer']['time'],
}
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument('commit_hash')
parser.add_argument('depot', choices=list(depot_map.DEPOT_PATH_MAP))
args = parser.parse_args()
revision_info = fetch_revision_info(args.commit_hash, args.depot)
print json.dumps(revision_info)
if __name__ == '__main__':
main()
|
eunchong/build
|
scripts/slave/recipe_modules/auto_bisect/resources/fetch_revision_info.py
|
Python
|
bsd-3-clause
| 1,529
|
from .config import *
DAHUA_FILES = OrderedDict([
("Install.lua", {
"required": True,
"type": DAHUA_TYPE.Plain
}),
("u-boot.bin.img", {
"required": True,
"type": DAHUA_TYPE.Plain,
"size": 0x00300000
}),
("uImage.img", {
"required": True,
"type": DAHUA_TYPE.Plain,
"size": 0x00a00000
}),
("romfs-x.squashfs.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.SquashFS,
"size": 0x03600000
}),
("web-x.squashfs.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.SquashFS,
"size": 0x00a00000
}),
("custom-x.squashfs.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.SquashFS,
"size": 0x00200000
}),
("logo-x.squashfs.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.CramFS,
"size": 0x00300000
})
])
|
BotoX/Dahua-Firmware-Mod-Kit
|
configs/NVR4XXX-4KS2.py
|
Python
|
gpl-3.0
| 808
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns('profile.views',
url(r'^(?P<username>[a-zA-Z_][a-zA-Z0-9_]*)/$',
'profile_view', name='profile_home'),
)
|
liweitianux/97dev
|
97suifangqa/apps/profile/urls.py
|
Python
|
bsd-2-clause
| 211
|
# -*- encoding: utf-8 -*-
from .cursebox import Cursebox
from .colors import colors
from .constants import EVENT_SKIP
from .utils import hex_to_rgb
logo = [u" █ ",
u"█▀█ █ █ █▀█ █▀▀ █▀█ █▀▄ █▀█ █▄█",
u"█ █ █ █ ▀▀█ █▄█ █ █ █ █ ▄█▄",
u"█▄█ █▄█ █ ▄▄█ █▄▄ █▄█ █▄█ █ █"]
grey = colors.from_rgb((127, 127, 127))
rainbow = ["ffffff", "ffaaaa", "ff5555", "ff0000",
"ff6d00", "ffda00", "b6ff00", "48ff00",
"00ff24", "00ff91", "00ffff", "0091ff",
"0024ff", "4800ff", "b600ff", "ff00da",
"ff006d", "ff0000", "ff5555", "ffaaaa"]
prompt = "cursebox v1.0 - Press any key to exit"
def demo():
l_width, l_height = len(logo[0]), len(logo)
x_s = 0.4
palette = [colors.from_rgb(hex_to_rgb(hex)) for hex in rainbow]
padding = [colors.white] * (int(x_s * l_width) + 3)
palette = padding + palette + padding
with Cursebox(blocking_events=False) as cb:
width, height = cb.width, cb.height
def draw_logo(t):
for y0, line in enumerate(logo):
y1 = (height - l_height) / 2 + y0
for x0, char in enumerate(line):
x1 = x0 + (width - l_width) / 2
offset = int(t + y0 + x_s * x0) % len(palette)
cb.put(x=x1, y=y1, text=char,
fg=palette[offset],
bg=colors.transparent)
t = 0
l = 100
cb.put(x=(width - len(prompt)) / 2,
y=(height + l_height) / 2 + 1,
text=prompt, fg=grey, bg=colors.transparent)
while cb.poll_event() == EVENT_SKIP:
draw_logo(t if t < len(palette) else 0)
t += 1
if t > l + len(palette):
t = 0
if __name__ == "__main__":
demo()
|
Tenchi2xh/cursebox
|
cursebox/__main__.py
|
Python
|
mit
| 1,969
|
import unittest
import unittest.mock
import functools
from g1.asyncs import kernels
from g1.operations.databases.bases import interfaces
from g1.operations.databases.servers import connections
# I am not sure why pylint cannot lint contextlib.asynccontextmanager
# correctly; let us disable this check for now.
#
# pylint: disable=not-async-context-manager
def synchronous(test_method):
@kernels.with_kernel
@functools.wraps(test_method)
def wrapper(self):
kernels.run(test_method(self))
return wrapper
class ConnectionsTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.conn = unittest.mock.Mock()
self.tx = self.conn.begin.return_value
self.manager = connections.ConnectionManager(self.conn)
unittest.mock.patch.multiple(
connections,
_WAIT_FOR_READER=0.01,
_WAIT_FOR_WRITER=0.01,
).start()
def tearDown(self):
unittest.mock.patch.stopall()
super().tearDown()
def assert_manager(
self,
num_readers,
tx_id,
rollback_tx_ids,
commit_tx_ids,
timeout_tx_ids,
):
self.assertEqual(self.manager._num_readers, num_readers)
self.assertEqual(self.manager._tx_id, tx_id)
self.assertEqual(tuple(self.manager._rollback_tx_ids), rollback_tx_ids)
self.assertEqual(tuple(self.manager._commit_tx_ids), commit_tx_ids)
self.assertEqual(tuple(self.manager._timeout_tx_ids), timeout_tx_ids)
self.assertEqual(self.manager.tx_id, tx_id)
@synchronous
async def test_reading(self):
self.assert_manager(0, 0, (), (), ())
async with self.manager.reading() as conn_1:
self.assert_manager(1, 0, (), (), ())
self.assertIs(conn_1, self.conn)
async with self.manager.reading() as conn_2:
self.assert_manager(2, 0, (), (), ())
self.assertIs(conn_2, self.conn)
async with self.manager.reading() as conn_3:
self.assert_manager(3, 0, (), (), ())
self.assertIs(conn_3, self.conn)
self.assert_manager(2, 0, (), (), ())
self.assert_manager(1, 0, (), (), ())
self.assert_manager(0, 0, (), (), ())
self.conn.begin.assert_not_called()
@synchronous
async def test_reading_timeout(self):
self.assert_manager(0, 0, (), (), ())
async with self.manager.transacting():
tx_id = self.manager.tx_id
with self.assertRaises(interfaces.TransactionTimeoutError):
async with self.manager.reading():
pass
self.assert_manager(0, 0, (), (tx_id, ), ())
self.conn.begin.assert_called_once()
@synchronous
async def test_writing(self):
with self.assertRaises(interfaces.InvalidRequestError):
async with self.manager.writing(0):
pass
with self.assertRaises(interfaces.TransactionNotFoundError):
async with self.manager.writing(1):
pass
self.assert_manager(0, 0, (), (), ())
async with self.manager.transacting():
tx_id = self.manager.tx_id
self.assert_manager(0, tx_id, (), (), ())
async with self.manager.writing(tx_id) as conn:
self.assert_manager(0, tx_id, (), (), ())
self.assertIs(conn, self.conn)
with self.assertRaises(interfaces.TransactionNotFoundError):
async with self.manager.writing(tx_id + 1):
pass
self.assert_manager(0, 0, (), (tx_id, ), ())
self.conn.begin.assert_called_once()
@synchronous
async def test_transacting(self):
self.assert_manager(0, 0, (), (), ())
async with self.manager.transacting() as conn:
tx_id = self.manager.tx_id
self.assertNotEqual(tx_id, 0)
self.assert_manager(0, tx_id, (), (), ())
self.assertIs(conn, self.conn)
self.assert_manager(0, 0, (), (tx_id, ), ())
self.conn.begin.assert_called_once()
@synchronous
async def test_transacting_rollback(self):
self.assert_manager(0, 0, (), (), ())
with self.assertRaises(ValueError):
async with self.manager.transacting():
tx_id = self.manager.tx_id
raise ValueError
self.assert_manager(0, 0, (tx_id, ), (), ())
self.conn.begin.assert_called_once()
@synchronous
async def test_transacting_timeout_on_reader(self):
self.assert_manager(0, 0, (), (), ())
async with self.manager.reading():
with self.assertRaises(interfaces.TransactionTimeoutError):
async with self.manager.transacting():
pass
self.assert_manager(0, 0, (), (), ())
self.conn.begin.assert_not_called()
@synchronous
async def test_transacting_timeout_on_writer(self):
self.assert_manager(0, 0, (), (), ())
async with self.manager.transacting():
tx_id = self.manager.tx_id
with self.assertRaises(interfaces.TransactionTimeoutError):
async with self.manager.transacting():
pass
self.assert_manager(0, 0, (), (tx_id, ), ())
self.conn.begin.assert_called_once()
@synchronous
async def test_begin(self):
with self.assertRaises(interfaces.InvalidRequestError):
await self.manager.begin(0)
self.assert_manager(0, 0, (), (), ())
conn = await self.manager.begin(1)
for _ in range(3): # begin is idempotent.
self.assertIs(await self.manager.begin(1), conn)
self.assertIs(conn, self.conn)
self.assert_manager(0, 1, (), (), ())
with self.assertRaises(interfaces.TransactionTimeoutError):
await self.manager.begin(2)
self.conn.begin.assert_called_once()
@synchronous
async def test_end(self):
with self.assertRaises(interfaces.InvalidRequestError):
await self.manager.rollback(0)
with self.assertRaises(interfaces.InvalidRequestError):
await self.manager.commit(0)
with self.assertRaises(interfaces.TransactionNotFoundError):
await self.manager.rollback(1)
with self.assertRaisesRegex(AssertionError, r'expect x != 0'):
await self.manager.rollback_due_to_timeout()
with self.assertRaises(interfaces.TransactionNotFoundError):
await self.manager.commit(1)
self.assert_manager(0, 0, (), (), ())
await self.manager.begin(1)
self.assert_manager(0, 1, (), (), ())
with self.assertRaises(interfaces.TransactionNotFoundError):
self.manager.rollback(999)
with self.assertRaises(interfaces.TransactionNotFoundError):
self.manager.commit(999)
self.tx.rollback.assert_not_called()
for _ in range(3): # rollback is idempotent.
self.manager.rollback(1)
self.tx.rollback.assert_called_once()
self.assert_manager(0, 0, (1, ), (), ())
await self.manager.begin(2)
self.tx.commit.assert_not_called()
for _ in range(3): # commit is idempotent.
self.manager.commit(2)
self.tx.commit.assert_called_once()
self.assert_manager(0, 0, (1, ), (2, ), ())
self.tx.rollback.reset_mock()
await self.manager.begin(3)
self.manager.rollback_due_to_timeout()
self.tx.rollback.assert_called_once()
self.assert_manager(0, 0, (1, ), (2, ), (3, ))
await self.manager.begin(1)
with self.assertRaises(interfaces.TransactionTimeoutError):
async with self.manager.writing(3):
pass
with self.assertRaises(interfaces.TransactionNotFoundError):
async with self.manager.writing(4):
pass
if __name__ == '__main__':
unittest.main()
|
clchiou/garage
|
py/g1/operations/databases/servers/tests/test_connections.py
|
Python
|
mit
| 8,015
|
"""
Support for Xiaomi Smart WiFi Socket and Smart Power Strip.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/switch.xiaomi_miio/
"""
import asyncio
from functools import partial
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA, )
from homeassistant.const import (CONF_NAME, CONF_HOST, CONF_TOKEN, )
from homeassistant.exceptions import PlatformNotReady
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Xiaomi Miio Switch'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
REQUIREMENTS = ['python-miio==0.3.1']
ATTR_POWER = 'power'
ATTR_TEMPERATURE = 'temperature'
ATTR_LOAD_POWER = 'load_power'
ATTR_MODEL = 'model'
SUCCESS = ['ok']
# pylint: disable=unused-argument
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the switch from config."""
from miio import Device, DeviceException
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
devices = []
try:
plug = Device(host, token)
device_info = plug.info()
_LOGGER.info("%s %s %s initialized",
device_info.model,
device_info.firmware_version,
device_info.hardware_version)
if device_info.model in ['chuangmi.plug.v1']:
from miio import PlugV1
plug = PlugV1(host, token)
# The device has two switchable channels (mains and a USB port).
# A switch device per channel will be created.
for channel_usb in [True, False]:
device = ChuangMiPlugV1Switch(
name, plug, device_info, channel_usb)
devices.append(device)
elif device_info.model in ['qmi.powerstrip.v1',
'zimi.powerstrip.v2']:
from miio import PowerStrip
plug = PowerStrip(host, token)
device = XiaomiPowerStripSwitch(name, plug, device_info)
devices.append(device)
elif device_info.model in ['chuangmi.plug.m1',
'chuangmi.plug.v2']:
from miio import Plug
plug = Plug(host, token)
device = XiaomiPlugGenericSwitch(name, plug, device_info)
devices.append(device)
else:
_LOGGER.error(
'Unsupported device found! Please create an issue at '
'https://github.com/rytilahti/python-miio/issues '
'and provide the following data: %s', device_info.model)
except DeviceException:
raise PlatformNotReady
async_add_devices(devices, update_before_add=True)
class XiaomiPlugGenericSwitch(SwitchDevice):
"""Representation of a Xiaomi Plug Generic."""
def __init__(self, name, plug, device_info):
"""Initialize the plug switch."""
self._name = name
self._icon = 'mdi:power-socket'
self._device_info = device_info
self._plug = plug
self._state = None
self._state_attrs = {
ATTR_TEMPERATURE: None,
ATTR_MODEL: self._device_info.model,
}
self._skip_update = False
@property
def should_poll(self):
"""Poll the plug."""
return True
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._state is not None
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@asyncio.coroutine
def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a plug command handling error messages."""
from miio import DeviceException
try:
result = yield from self.hass.async_add_job(
partial(func, *args, **kwargs))
_LOGGER.debug("Response received from plug: %s", result)
return result == SUCCESS
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
return False
@asyncio.coroutine
def async_turn_on(self, **kwargs):
"""Turn the plug on."""
result = yield from self._try_command(
"Turning the plug on failed.", self._plug.on)
if result:
self._state = True
self._skip_update = True
@asyncio.coroutine
def async_turn_off(self, **kwargs):
"""Turn the plug off."""
result = yield from self._try_command(
"Turning the plug off failed.", self._plug.off)
if result:
self._state = False
self._skip_update = True
@asyncio.coroutine
def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = yield from self.hass.async_add_job(self._plug.status)
_LOGGER.debug("Got new state: %s", state)
self._state = state.is_on
self._state_attrs.update({
ATTR_TEMPERATURE: state.temperature
})
except DeviceException as ex:
_LOGGER.error("Got exception while fetching the state: %s", ex)
class XiaomiPowerStripSwitch(XiaomiPlugGenericSwitch, SwitchDevice):
"""Representation of a Xiaomi Power Strip."""
def __init__(self, name, plug, device_info):
"""Initialize the plug switch."""
XiaomiPlugGenericSwitch.__init__(self, name, plug, device_info)
self._state_attrs = {
ATTR_TEMPERATURE: None,
ATTR_LOAD_POWER: None,
ATTR_MODEL: self._device_info.model,
}
@asyncio.coroutine
def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = yield from self.hass.async_add_job(self._plug.status)
_LOGGER.debug("Got new state: %s", state)
self._state = state.is_on
self._state_attrs.update({
ATTR_TEMPERATURE: state.temperature,
ATTR_LOAD_POWER: state.load_power
})
except DeviceException as ex:
_LOGGER.error("Got exception while fetching the state: %s", ex)
class ChuangMiPlugV1Switch(XiaomiPlugGenericSwitch, SwitchDevice):
"""Representation of a Chuang Mi Plug V1."""
def __init__(self, name, plug, device_info, channel_usb):
"""Initialize the plug switch."""
name = name + ' USB' if channel_usb else name
XiaomiPlugGenericSwitch.__init__(self, name, plug, device_info)
self._channel_usb = channel_usb
@asyncio.coroutine
def async_turn_on(self, **kwargs):
"""Turn a channel on."""
if self._channel_usb:
result = yield from self._try_command(
"Turning the plug on failed.", self._plug.usb_on)
else:
result = yield from self._try_command(
"Turning the plug on failed.", self._plug.on)
if result:
self._state = True
self._skip_update = True
@asyncio.coroutine
def async_turn_off(self, **kwargs):
"""Turn a channel off."""
if self._channel_usb:
result = yield from self._try_command(
"Turning the plug on failed.", self._plug.usb_off)
else:
result = yield from self._try_command(
"Turning the plug on failed.", self._plug.off)
if result:
self._state = False
self._skip_update = True
@asyncio.coroutine
def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = yield from self.hass.async_add_job(self._plug.status)
_LOGGER.debug("Got new state: %s", state)
if self._channel_usb:
self._state = state.usb_power
else:
self._state = state.is_on
self._state_attrs.update({
ATTR_TEMPERATURE: state.temperature
})
except DeviceException as ex:
_LOGGER.error("Got exception while fetching the state: %s", ex)
|
stefan-jonasson/home-assistant
|
homeassistant/components/switch/xiaomi_miio.py
|
Python
|
mit
| 9,438
|
import pdb
from copy import deepcopy
class Board:
board=None
def __init__(self, input_board=None, size=3):
if not input_board:
self.board = [[0]*size for i in range(size)]
else:
self.board = deepcopy(input_board.board)
def size(self):
if not self.board: return 0
return len(self.board)
def numOpenSpots(self):
spots=[row.count(0) for row in self.board]
num_spots=sum(spots)
return num_spots
def numQueens(self):
queens=[row.count(9) for row in self.board]
# count the number of queens on board
num_queens=queens.count(1)
return num_queens
def rowHasQueen(self, row):
if self.board[row].count(9) == 0:
return False
return True
def getQueens(self):
pos_list=[]
for row, item in enumerate(self.board):
pos_list.append(0)
for col, val in enumerate(item):
#print "pos r=%d c=%d v=%d row=%s pos=%s"%(row, col, val, item, pos_list)
if val == 9:
pos_list[-1]=col+1
return tuple(pos_list)
def printBoard(self):
# print the board
print '\n'.join(str(row) for row in self.board)
num_queens=self.numQueens()
pos_list=self.getQueens()
print num_queens, pos_list
def placeQueen(self, x, y):
if x >= self.size():
return False
if y >= self.size():
return False
# set queen
if self.board[x][y] == 0:
self.board[x][y]=9
else:
#print "%d %d is %d" %(x,y,self.board[x][y])
return False
# fill row with ones
for col, item in enumerate(self.board[x]):
if item == 0:
self.board[x][col]=1
# fill col with ones
for row, item in enumerate(self.board):
if item[y] == 0:
self.board[row][y]=1
# down to right
col=y
for row, item in enumerate(self.board[x:]):
try:
#print "dr: col=%d row=%d item=%s" %(col,row,item)
if item[col] == 0: item[col]=1
col+=1
except IndexError as e:
break
# up to left
col=y-1
for row, item in reversed(list(enumerate(self.board[:x]))):
try:
if col < 0: break
#print "ul: col=%d row=%d item=%s" %(col,row,item)
if item[col] == 0: item[col]=1
col-=1
except IndexError as e:
break
# down to left
col=y
for row, item in enumerate(self.board[x:]):
try:
if col < 0: break
#print "dl: col=%d row=%d item=%s" %(col,row,item)
if item[col] == 0: item[col]=1
col-=1
except IndexError as e:
break
# up to right
col=y+1
for row, item in reversed(list(enumerate(self.board[:x]))):
try:
#print "ur: col=%d row=%d item=%s" %(col,row,item)
if item[col] == 0: item[col]=1
col+=1
except IndexError as e:
break
return True
#############################################################################
def findSolution(solutions, board, r, c):
#print "enter %d %d lvl=%s"%(r,c,lvl)
#board.printBoard()
# found a solution
if board.size() == board.numQueens():
sol=board.getQueens()
if sol not in solutions:
print "found solution"
board.printBoard()
solutions.add(sol)
return True
# check if board has empty spots
if board.numOpenSpots() == 0:
#print "no more open spots lvl=%s"%(lvl)
#board.printBoard()
return False
final_rc=True
oldboard = Board(board)
# for each col in row try to place a queen
for tryc in xrange(0,board.size()):
rc = board.placeQueen(r,tryc)
#print " check %d %d rc=%d lvl=%s"%(r,tryc,rc,lvl)
#board.printBoard()
if rc:
tryr = r+1
if tryr >= board.size():
tryr=0
rc=findSolution(solutions, board,tryr,0)
if not rc:
final_rc=rc
#print "fs rc=%s lvl=%s"%(str(rc), lvl)
board=Board(oldboard)
if final_rc == False:
return False
# tried all cols in row, now try again with next row
tryr = r+1
if tryr >= board.size():
tryr=0
if board.rowHasQueen(tryr):
#print "row %d has queen lvl=%s"%(tryr,lvl)
return False
rc=findSolution(solutions, board,tryr,tryc)
return rc
def runFindSolutions(queens):
size=queens
solution_set=set()
br = Board(size=size)
print "start %d %d"%(0,0)
findSolution(solution_set, br, 0, 0)
for s in sorted(solution_set):
print s
print "%d %s"%(len(solution_set), solution_set)
if __name__ == "__main__":
runFindSolutions(10)
|
loafdog/loaf-src
|
prog-probs/eight-queens/EightQueens.py
|
Python
|
apache-2.0
| 5,123
|
#!/usr/bin/python
import hashlib
import os
import subprocess
import ecdsa
from binascii import hexlify, unhexlify
print 'master secret:',
h = raw_input()
if h:
h = unhexlify(h)
else:
h = hashlib.sha256(os.urandom(1024)).digest()
print
print 'master secret:', hexlify(h)
print
for i in range(1, 6):
se = hashlib.sha256(h + chr(i)).hexdigest()
print 'seckey', i, ':', se
sk = ecdsa.SigningKey.from_secret_exponent(secexp = int(se, 16), curve=ecdsa.curves.SECP256k1, hashfunc=hashlib.sha256)
print 'pubkey', i, ':', '04' + hexlify(sk.get_verifying_key().to_string())
print sk.to_pem()
p = subprocess.Popen('ssss-split -t 3 -n 5 -x'.split(' '), stdin = subprocess.PIPE)
p.communicate(input = hexlify(h) + '\n')
# to recover use:
# $ ssss-combine -t 3 -x
|
aussiehash/trezor-mcu
|
bootloader/firmware_sign_split.py
|
Python
|
lgpl-3.0
| 784
|
# -*- coding: utf-8 -*-
#
# S3AAA Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/tests/unit_tests/modules/s3/s3aaa.py
#
import unittest
from gluon import *
from gluon.storage import Storage
from s3.s3aaa import S3EntityRoleManager, S3Permission
# =============================================================================
class AuthUtilsTests(unittest.TestCase):
""" S3Auth Utility Methods Tests """
# -------------------------------------------------------------------------
def testSystemRoles(self):
""" Test if system roles are present """
sr = current.auth.get_system_roles()
self.assertTrue("ADMIN" in sr)
self.assertTrue(sr.ADMIN is not None)
self.assertTrue("AUTHENTICATED" in sr)
self.assertTrue(sr.AUTHENTICATED is not None)
self.assertTrue("ANONYMOUS" in sr)
self.assertTrue(sr.ANONYMOUS is not None)
# -------------------------------------------------------------------------
def testGetUserIDByEmail(self):
""" Test user account identification by email """
user_id = current.auth.s3_get_user_id("normaluser@example.com")
self.assertTrue(user_id is not None)
# -------------------------------------------------------------------------
def testImpersonate(self):
""" Test s3_impersonate """
auth = current.auth
session = current.session
sr = auth.get_system_roles()
ADMIN = sr.ADMIN
ANONYMOUS = sr.ANONYMOUS
# Test-login as system administrator
auth.s3_impersonate("admin@example.com")
self.assertTrue(auth.s3_logged_in())
self.assertTrue(auth.user is not None)
self.assertTrue(ADMIN in session.s3.roles)
self.assertTrue(ANONYMOUS in session.s3.roles)
self.assertTrue(ADMIN in auth.user.realms)
# Test with nonexistent user
self.assertRaises(ValueError, auth.s3_impersonate, "NonExistentUser")
# => should still be logged in as ADMIN
self.assertTrue(auth.s3_logged_in())
self.assertTrue(ADMIN in session.s3.roles)
# Test with None => should logout and reset the roles
auth.s3_impersonate(None)
self.assertFalse(auth.s3_logged_in())
self.assertTrue(session.s3.roles == [] or
ANONYMOUS in session.s3.roles)
# Logout
auth.s3_impersonate(None)
# =============================================================================
class SetRolesTests(unittest.TestCase):
""" Test AuthS3.set_roles """
def setUp(self):
# Create test organisations
xmlstr = """
<s3xml>
<resource name="org_organisation" uuid="SRTO1">
<data field="name">SetRoleTestsOrg1</data>
</resource>
<resource name="org_organisation" uuid="SRTO2">
<data field="name">SetRoleTestsOrg2</data>
</resource>
<resource name="org_organisation" uuid="SRTO3">
<data field="name">SetRoleTestsOrg3</data>
</resource>
</s3xml>"""
try:
auth = current.auth
auth.override = True
from lxml import etree
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
s3db = current.s3db
resource = s3db.resource("org_organisation")
resource.import_xml(xmltree)
resource = s3db.resource("org_organisation",
uid=["SRTO1", "SRTO2", "SRTO3"])
rows = resource.select(["pe_id", "uuid"], as_rows=True)
orgs = dict((row.uuid, row.pe_id) for row in rows)
self.org1 = orgs["SRTO1"]
self.org2 = orgs["SRTO2"]
self.org3 = orgs["SRTO3"]
auth.override = False
except:
current.db.rollback()
auth.override = False
raise
# -------------------------------------------------------------------------
def testSetRolesPolicy3(self):
""" Test set_roles with policy 3 """
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 3
auth.permission = S3Permission(auth)
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms.keys()
self.assertEqual(len(realms), 2)
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
for r in auth.user.realms:
self.assertEqual(auth.user.realms[r], None)
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testSetRolesPolicy4(self):
""" Test set_roles with policy 4 """
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 4
auth.permission = S3Permission(auth)
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms.keys()
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
self.assertEqual(len(realms), 2)
for r in auth.user.realms:
self.assertEqual(auth.user.realms[r], None)
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testSetRolesPolicy5(self):
""" Test set_roles with policy 5 """
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 5
auth.permission = S3Permission(auth)
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms.keys()
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
self.assertEqual(len(realms), 2)
for r in auth.user.realms:
self.assertEqual(auth.user.realms[r], None)
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testSetRolesPolicy6(self):
""" Test set_roles with policy 6 """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 6
auth.permission = S3Permission(auth)
try:
# Create a test role
role = auth.s3_create_role("Example Role", uid="TESTROLE")
# Assign normaluser this role for a realm
user_id = auth.s3_get_user_id("normaluser@example.com")
auth.s3_assign_role(user_id, role, for_pe=self.org1)
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms.keys()
self.assertEqual(len(realms), 3)
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
self.assertTrue(role in realms)
for r in auth.user.realms:
if r == role:
self.assertEqual(auth.user.realms[r], [self.org1])
else:
self.assertEqual(auth.user.realms[r], None)
finally:
auth.s3_impersonate(None)
auth.s3_delete_role("TESTROLE")
current.db.rollback()
# -------------------------------------------------------------------------
def testSetRolesPolicy7(self):
""" Test set_roles with policy 7 """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 7
auth.permission = S3Permission(auth)
try:
# Create a test role
role = auth.s3_create_role("Example Role", uid="TESTROLE")
# Create an OU-affiliation for two organisations
org1 = self.org1
org2 = self.org2
s3db.pr_add_affiliation(org1, org2, role="TestRole")
# Assign normaluser this role for the realm of the parent org
user_id = auth.s3_get_user_id("normaluser@example.com")
auth.s3_assign_role(user_id, role, for_pe=org1)
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms.keys()
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
self.assertTrue(role in realms)
self.assertEqual(len(realms), 3)
for r in auth.user.realms:
if r == role:
self.assertTrue(org1 in auth.user.realms[r])
self.assertTrue(org2 in auth.user.realms[r])
else:
self.assertEqual(auth.user.realms[r], None)
finally:
auth.s3_impersonate(None)
auth.s3_delete_role("TESTROLE")
current.db.rollback()
# -------------------------------------------------------------------------
def testSetRolesPolicy8(self):
""" Test set_roles with policy 8 """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 8
auth.permission = S3Permission(auth)
try:
# Create a test role
role = auth.s3_create_role("Test Group", uid="TESTGROUP")
# Have two orgs, set org2 as OU descendant of org1
org1 = self.org1
org2 = self.org2
s3db.pr_add_affiliation(org1, org2, role="TestOrgUnit")
# Have a third org
org3 = self.org3
# Add the user as OU descendant of org3
user_id = auth.s3_get_user_id("normaluser@example.com")
user_pe = auth.s3_user_pe_id(user_id)
s3db.pr_add_affiliation(org3, user_pe, role="TestStaff")
# Assign normaluser the test role for org3
auth.s3_assign_role(user_id, role, for_pe=org3)
# Delegate the test role for org1 to org3
auth.s3_delegate_role("TESTGROUP", org1, receiver=org3)
# Impersonate as normal user
auth.s3_impersonate("normaluser@example.com")
# Check the realms
realms = auth.user.realms.keys()
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
self.assertTrue(role in realms)
self.assertEqual(len(realms), 3)
for r in auth.user.realms:
if r == role:
self.assertTrue(org3 in auth.user.realms[r])
else:
self.assertEqual(auth.user.realms[r], None)
# Check the delegations
delegations = auth.user.delegations.keys()
self.assertTrue(role in delegations)
self.assertEqual(len(delegations), 1)
realms = auth.user.delegations[role]
self.assertTrue(org3 in realms)
self.assertEqual(len(realms), 1)
self.assertTrue(org1 in realms[org3])
self.assertTrue(org2 in realms[org3])
# Remove the delegations
auth.s3_remove_delegation("TESTGROUP", org1, receiver=org3)
# Check the delegations again
delegations = auth.user.delegations.keys()
self.assertFalse(role in delegations)
self.assertEqual(len(delegations), 0)
finally:
s3db.pr_remove_affiliation(org1, org2, role="TestOrgUnit")
s3db.pr_remove_affiliation(org1, org2, role="TestStaff")
auth.s3_delete_role("TESTGROUP")
current.db.rollback()
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
#def testPerformance(self):
#MAX_RUNTIME = 18 # Maximum acceptable runtime per request in milliseconds
##MAX_RUNTIME = 3 # Maximum acceptable runtime per request in milliseconds (up to policy 7)
#deployment_settings.security.policy = 8
#from s3.s3aaa import S3Permission
#auth.permission = S3Permission(auth)
#try:
#org1 = s3db.pr_get_pe_id("org_organisation", 1)
#org2 = s3db.pr_get_pe_id("org_organisation", 2)
#s3db.pr_add_affiliation(org1, org2, role="TestOrgUnit")
#org3 = s3db.pr_get_pe_id("org_organisation", 3)
#partners = s3db.pr_add_affiliation(org1, org3, role="TestPartners", role_type=9)
#user = auth.s3_user_pe_id(auth.s3_get_user_id("normaluser@example.com"))
#s3db.pr_add_affiliation(org3, user, role="TestStaff")
#role = auth.s3_create_role("Test Group", uid="TESTGROUP")
#dtable = s3db.pr_delegation
#record = dtable.insert(role_id=partners, group_id=role)
#def setRoles():
#auth.s3_impersonate("normaluser@example.com")
#import timeit
#runtime = timeit.Timer(setRoles).timeit(number=100)
#if runtime > (MAX_RUNTIME / 10.0):
#raise AssertionError("s3_impersonate: maximum acceptable run time exceeded (%sms > %sms)" % (int(runtime * 10), MAX_RUNTIME))
## Logout
#auth.s3_impersonate(None)
#finally:
#s3db.pr_remove_affiliation(org1, org2, role="TestOrgUnit")
#s3db.pr_remove_affiliation(org1, org2, role="TestStaff")
#s3db.pr_remove_affiliation(org1, org3, role="TestPartners")
#auth.s3_delete_role("TESTGROUP")
#current.db.rollback()
# -------------------------------------------------------------------------
def tearDown(self):
current.db.rollback()
current.auth.override = False
# =============================================================================
class RoleAssignmentTests(unittest.TestCase):
""" Test role assignments """
# -------------------------------------------------------------------------
def testAssignRole(self):
""" Test role assignment to a user """
db = current.db
auth = current.auth
UUID1 = "TESTAUTOCREATEDROLE1"
UUID2 = "TESTAUTOCREATEDROLE2"
uuids = [UUID1, UUID2]
table = auth.settings.table_group
query1 = (table.deleted != True) & (table.uuid == UUID1)
query2 = (table.deleted != True) & (table.uuid == UUID2)
auth.s3_impersonate("admin@example.com")
user_id = auth.user.id
row = db(query1).select(limitby=(0, 1)).first()
self.assertEqual(row, None)
row = db(query2).select(limitby=(0, 1)).first()
self.assertEqual(row, None)
auth.s3_assign_role(user_id, uuids, for_pe=0)
row = db(query1).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertTrue(row.id > 0)
self.assertTrue(row.role == UUID1)
self.assertTrue(row.uuid == UUID1)
row = db(query2).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertTrue(row.id > 0)
self.assertTrue(row.role == UUID2)
self.assertTrue(row.uuid == UUID2)
auth.s3_delete_role(UUID1)
row = db(query1).select(limitby=(0, 1)).first()
self.assertEqual(row, None)
row = db(query2).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertTrue(row.id > 0)
self.assertTrue(row.role == UUID2)
self.assertTrue(row.uuid == UUID2)
auth.s3_delete_role(UUID2)
row = db(query1).select(limitby=(0, 1)).first()
self.assertEqual(row, None)
row = db(query2).select(limitby=(0, 1)).first()
self.assertEqual(row, None)
# -------------------------------------------------------------------------
def testGetRoles(self):
""" Test role lookup for a user """
auth = current.auth
UUID = "TESTAUTOCREATEDROLE"
role_id = auth.s3_create_role(UUID, uid=UUID)
try:
auth.s3_impersonate("normaluser@example.com")
user_id = auth.user.id
auth.s3_assign_role(user_id, role_id, for_pe=None)
roles = auth.s3_get_roles(user_id)
self.assertTrue(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=None)
self.assertTrue(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=0)
self.assertFalse(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=1)
self.assertFalse(role_id in roles)
auth.s3_retract_role(user_id, role_id, for_pe=None)
auth.s3_assign_role(user_id, role_id, for_pe=0)
roles = auth.s3_get_roles(user_id)
self.assertTrue(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=None)
self.assertFalse(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=0)
self.assertTrue(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=1)
self.assertFalse(role_id in roles)
auth.s3_retract_role(user_id, role_id, for_pe=0)
auth.s3_assign_role(user_id, role_id, for_pe=1)
roles = auth.s3_get_roles(user_id)
self.assertTrue(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=None)
self.assertFalse(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=0)
self.assertFalse(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=1)
self.assertTrue(role_id in roles)
auth.s3_retract_role(user_id, role_id, for_pe=1)
finally:
auth.s3_delete_role(UUID)
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def tearDown(self):
current.db.rollback()
# =============================================================================
class RecordOwnershipTests(unittest.TestCase):
""" Test record ownership """
# -------------------------------------------------------------------------
def testOwnershipRequired(self):
""" Test ownership_required for all policies """
from s3.s3aaa import S3Permission
auth = current.auth
permission = auth.permission
deployment_settings = current.deployment_settings
policy = deployment_settings.get_security_policy()
try:
deployment_settings.security.policy = 1
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update", "dvi_body", c="dvi", f="body")
self.assertFalse(o)
deployment_settings.security.policy = 2
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update", "dvi_body", c="dvi", f="body")
self.assertFalse(o)
deployment_settings.security.policy = 3
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update", "dvi_body", c="dvi", f="body")
self.assertTrue(o)
deployment_settings.security.policy = 4
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update", "dvi_body", c="dvi", f="body")
self.assertTrue(o)
deployment_settings.security.policy = 5
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update", "dvi_body", c="dvi", f="body")
self.assertTrue(o)
deployment_settings.security.policy = 6
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update", "dvi_body", c="dvi", f="body")
self.assertTrue(o)
deployment_settings.security.policy = 7
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update", "dvi_body", c="dvi", f="body")
self.assertTrue(o)
deployment_settings.security.policy = 8
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update", "dvi_body", c="dvi", f="body")
self.assertTrue(o)
deployment_settings.security.policy = 0
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update", "dvi_body", c="dvi", f="body")
self.assertTrue(o)
finally:
deployment_settings.security.policy = policy
# -------------------------------------------------------------------------
def testSessionOwnership(self):
""" Test session ownership methods """
db = current.db
s3db = current.s3db
auth = current.auth
table = s3db.pr_person
table2 = "dvi_body"
auth.s3_impersonate(None)
auth.s3_clear_session_ownership()
auth.s3_make_session_owner(table, 1)
# No record ID should always return False
self.assertFalse(auth.s3_session_owns(table, None))
# Check for non-owned record
self.assertFalse(auth.s3_session_owns(table, 2))
# Check for owned record
self.assertTrue(auth.s3_session_owns(table, 1))
# If user is logged-in, session ownership is always False
auth.s3_impersonate("normaluser@example.com")
self.assertFalse(auth.s3_session_owns(table, 1))
auth.s3_impersonate(None)
auth.s3_make_session_owner(table, 1)
auth.s3_make_session_owner(table, 2)
self.assertTrue(auth.s3_session_owns(table, 1))
self.assertTrue(auth.s3_session_owns(table, 2))
auth.s3_clear_session_ownership(table, 1)
self.assertFalse(auth.s3_session_owns(table, 1))
self.assertTrue(auth.s3_session_owns(table, 2))
auth.s3_make_session_owner(table, 1)
auth.s3_make_session_owner(table, 2)
auth.s3_make_session_owner(table2, 1)
auth.s3_make_session_owner(table2, 2)
self.assertTrue(auth.s3_session_owns(table, 1))
self.assertTrue(auth.s3_session_owns(table, 2))
self.assertTrue(auth.s3_session_owns(table2, 1))
self.assertTrue(auth.s3_session_owns(table2, 2))
auth.s3_clear_session_ownership(table)
self.assertFalse(auth.s3_session_owns(table, 1))
self.assertFalse(auth.s3_session_owns(table, 2))
self.assertTrue(auth.s3_session_owns(table2, 1))
self.assertTrue(auth.s3_session_owns(table2, 2))
auth.s3_make_session_owner(table, 1)
auth.s3_make_session_owner(table, 2)
auth.s3_make_session_owner(table2, 1)
auth.s3_make_session_owner(table2, 2)
self.assertTrue(auth.s3_session_owns(table, 1))
self.assertTrue(auth.s3_session_owns(table, 2))
self.assertTrue(auth.s3_session_owns(table2, 1))
self.assertTrue(auth.s3_session_owns(table2, 2))
auth.s3_clear_session_ownership()
self.assertFalse(auth.s3_session_owns(table, 1))
self.assertFalse(auth.s3_session_owns(table, 2))
self.assertFalse(auth.s3_session_owns(table2, 1))
self.assertFalse(auth.s3_session_owns(table2, 2))
# -------------------------------------------------------------------------
def testOwnershipPublicRecord(self):
""" Test ownership for a public record """
auth = current.auth
s3_impersonate = auth.s3_impersonate
is_owner = auth.permission.is_owner
assertTrue = self.assertTrue
table, record_id = self.create_test_record()
auth.s3_clear_session_ownership()
try:
# Admin owns all records
s3_impersonate("admin@example.com")
assertTrue(is_owner(table, record_id))
# Normal owns all public records
s3_impersonate("normaluser@example.com")
assertTrue(is_owner(table, record_id))
# Unauthenticated users never own a record
s3_impersonate(None)
self.assertFalse(is_owner(table, record_id))
# ...unless the session owns the record
auth.s3_make_session_owner(table, record_id)
assertTrue(is_owner(table, record_id))
finally:
self.remove_test_record()
# -------------------------------------------------------------------------
def testOwnershipAdminOwnedRecord(self):
""" Test ownership for an Admin-owned record """
auth = current.auth
is_owner = auth.permission.is_owner
s3_impersonate = auth.s3_impersonate
table, record_id = self.create_test_record()
auth.s3_clear_session_ownership()
try:
user_id = auth.s3_get_user_id("admin@example.com")
current.db(table.id == record_id).update(owned_by_user=user_id)
# Admin owns all records
s3_impersonate("admin@example.com")
self.assertTrue(is_owner(table, record_id))
# Normal does not own this record
s3_impersonate("normaluser@example.com")
self.assertFalse(is_owner(table, record_id))
# Unauthenticated does not own this record
s3_impersonate(None)
self.assertFalse(is_owner(table, record_id))
# ...unless the session owns the record
auth.s3_make_session_owner(table, record_id)
self.assertTrue(is_owner(table, record_id))
finally:
self.remove_test_record()
# -------------------------------------------------------------------------
def testOwnershipUserOwnedRecord(self):
""" Test ownership for a user-owned record """
auth = current.auth
is_owner = auth.permission.is_owner
s3_impersonate = auth.s3_impersonate
table, record_id = self.create_test_record()
auth.s3_clear_session_ownership()
try:
# Change the record owner to admin
user_id = auth.s3_get_user_id("normaluser@example.com")
current.db(table.id == record_id).update(owned_by_user=user_id)
# Admin owns all records
s3_impersonate("admin@example.com")
self.assertTrue(is_owner(table, record_id))
# Normal owns this record
s3_impersonate("normaluser@example.com")
self.assertTrue(is_owner(table, record_id))
# Unauthenticated does not own a record
s3_impersonate(None)
self.assertFalse(is_owner(table, record_id))
# ...unless the session owns the record
auth.s3_make_session_owner(table, record_id)
self.assertTrue(is_owner(table, record_id))
finally:
self.remove_test_record()
# -------------------------------------------------------------------------
def testOwnershipGroupOwnedRecord(self):
""" Test ownership for a collectively owned record """
auth = current.auth
is_owner = auth.permission.is_owner
s3_impersonate = auth.s3_impersonate
table, record_id = self.create_test_record()
auth.s3_clear_session_ownership()
try:
sr = auth.get_system_roles()
user_id = auth.s3_get_user_id("admin@example.com")
current.db(table.id == record_id).update(owned_by_user=user_id,
owned_by_group=sr.AUTHENTICATED)
# Admin owns all records
s3_impersonate("admin@example.com")
self.assertTrue(is_owner(table, record_id))
# Normal owns this record as member of AUTHENTICATED
s3_impersonate("normaluser@example.com")
self.assertTrue(is_owner(table, record_id))
# Unauthenticated does not own this record
s3_impersonate(None)
self.assertFalse(is_owner(table, record_id))
# ...unless the session owns the record
auth.s3_make_session_owner(table, record_id)
self.assertTrue(is_owner(table, record_id))
finally:
self.remove_test_record()
# -------------------------------------------------------------------------
def testOwnershipOrganisationOwnedRecord(self):
""" Test group-ownership for an entity-owned record """
auth = current.auth
is_owner = auth.permission.is_owner
s3_impersonate = auth.s3_impersonate
table, record_id = self.create_test_record()
auth.s3_clear_session_ownership()
try:
org = current.s3db.pr_get_pe_id("org_organisation", 1)
role = auth.s3_create_role("Example Role", uid="TESTROLE")
user_id = auth.s3_get_user_id("admin@example.com")
current.db(table.id == record_id).update(owned_by_user=user_id,
owned_by_group=role,
realm_entity=org)
# Admin owns all records
s3_impersonate("admin@example.com")
self.assertTrue(is_owner(table, record_id))
# Normal user does not own the record
s3_impersonate("normaluser@example.com")
user_id = auth.user.id
self.assertFalse(is_owner(table, record_id))
# ...unless they have the role for this org
auth.s3_assign_role(user_id, role, for_pe=org)
self.assertTrue(is_owner(table, record_id))
auth.s3_retract_role(user_id, role, for_pe=org)
self.assertFalse(is_owner(table, record_id))
# ....or have the role without limitation (any org)
auth.s3_assign_role(user_id, role, for_pe=0)
self.assertTrue(is_owner(table, record_id))
auth.s3_retract_role(user_id, role, for_pe=[])
self.assertFalse(is_owner(table, record_id))
# Unauthenticated does not own this record
s3_impersonate(None)
self.assertFalse(is_owner(table, record_id))
# ...unless the session owns the record
auth.s3_make_session_owner(table, record_id)
self.assertTrue(is_owner(table, record_id))
finally:
self.remove_test_record()
auth.s3_delete_role("TESTROLE")
# -------------------------------------------------------------------------
def testOwnershipOverride(self):
""" Test override of owners in is_owner """
auth = current.auth
table, record_id = self.create_test_record()
auth.s3_clear_session_ownership()
try:
org = current.s3db.pr_get_pe_id("org_organisation", 1)
role = auth.s3_create_role("Example Role", uid="TESTROLE")
user_id = auth.s3_get_user_id("admin@example.com")
current.db(table.id == record_id).update(realm_entity=org,
owned_by_group=role,
owned_by_user=user_id)
# Normal user does not own the record
auth.s3_impersonate("normaluser@example.com")
self.assertFalse(auth.permission.is_owner(table, record_id))
# ...unless we override the record's owner stamp
owners_override = (None, None, None)
self.assertTrue(auth.permission.is_owner(table, record_id,
owners=owners_override))
finally:
self.remove_test_record()
auth.s3_delete_role("TESTROLE")
# -------------------------------------------------------------------------
def testGetOwners(self):
""" Test lookup of record owners """
auth = current.auth
table, record_id = self.create_test_record()
auth.s3_clear_session_ownership()
assertEqual = self.assertEqual
try:
user = auth.s3_get_user_id("admin@example.com")
role = auth.s3_create_role("Example Role", uid="TESTROLE")
org = current.s3db.pr_get_pe_id("org_organisation", 1)
e, r, u = auth.permission.get_owners(table, None)
assertEqual(e, None)
assertEqual(r, None)
assertEqual(u, None)
e, r, u = auth.permission.get_owners(None, record_id)
assertEqual(e, None)
assertEqual(r, None)
assertEqual(u, None)
e, r, u = auth.permission.get_owners(None, None)
assertEqual(e, None)
assertEqual(r, None)
assertEqual(u, None)
e, r, u = auth.permission.get_owners(table, record_id)
assertEqual(e, None)
assertEqual(r, None)
assertEqual(u, None)
current.db(table.id == record_id).update(owned_by_user=user,
owned_by_group=role,
realm_entity=org)
e, r, u = auth.permission.get_owners(table, record_id)
assertEqual(e, org)
assertEqual(r, role)
assertEqual(u, user)
e, r, u = auth.permission.get_owners(table._tablename, record_id)
assertEqual(e, org)
assertEqual(r, role)
assertEqual(u, user)
finally:
self.remove_test_record()
auth.s3_delete_role("TESTROLE")
# -------------------------------------------------------------------------
def tearDown(self):
current.auth.s3_impersonate(None)
current.db.rollback()
# -------------------------------------------------------------------------
# Helpers
#
def create_test_record(self):
auth = current.auth
# Create a record
auth.s3_impersonate(None)
table = current.s3db.org_office
table.owned_by_user.default=None
auth.override = True
record_id = table.insert(name="Ownership Test Office")
auth.override = False
self.table = table
self.record_id = record_id
return table, record_id
# -------------------------------------------------------------------------
def remove_test_record(self):
current.db(self.table.id == self.record_id).delete()
return
# =============================================================================
class ACLManagementTests(unittest.TestCase):
""" Test ACL management/lookup functions """
# -------------------------------------------------------------------------
def testRequiredACL(self):
""" Test lambda to compute the required ACL """
p = current.auth.permission
assertEqual = self.assertEqual
assertEqual(p.required_acl(["read"]), p.READ)
assertEqual(p.required_acl(["create"]), p.CREATE)
assertEqual(p.required_acl(["update"]), p.UPDATE)
assertEqual(p.required_acl(["delete"]), p.DELETE)
assertEqual(p.required_acl(["create", "update"]), p.CREATE | p.UPDATE)
assertEqual(p.required_acl([]), p.NONE)
assertEqual(p.required_acl(["invalid"]), p.NONE)
# -------------------------------------------------------------------------
def testMostPermissive(self):
""" Test lambda to compute the most permissive ACL """
p = current.auth.permission
self.assertEqual(p.most_permissive([(p.NONE, p.READ),
(p.READ, p.READ)]),
(p.READ, p.READ))
self.assertEqual(p.most_permissive([(p.NONE, p.ALL),
(p.CREATE, p.ALL),
(p.READ, p.ALL)]),
(p.READ | p.CREATE, p.ALL))
# -------------------------------------------------------------------------
def testMostRestrictive(self):
""" Test lambda to compute the most restrictive ACL """
p = current.auth.permission
self.assertEqual(p.most_restrictive([(p.NONE, p.READ),
(p.READ, p.READ)]),
(p.NONE, p.READ))
self.assertEqual(p.most_restrictive([(p.CREATE, p.ALL),
(p.READ, p.READ)]),
(p.NONE, p.READ))
# -------------------------------------------------------------------------
def testUpdateControllerACL(self):
""" Test update/delete of a controller ACL """
auth = current.auth
table = auth.permission.table
self.assertNotEqual(table, None)
group_id = auth.s3_create_role("Test Role", uid="TEST")
acl_id = None
try:
self.assertTrue(group_id is not None and group_id != 0)
c = "pr"
f = "person"
uacl = auth.permission.NONE
oacl = auth.permission.ALL
acl_id = auth.permission.update_acl(group_id,
c=c, f=f,
uacl=uacl, oacl=oacl)
self.assertNotEqual(acl_id, None)
self.assertNotEqual(acl_id, 0)
acl = table[acl_id]
self.assertNotEqual(acl, None)
self.assertEqual(acl.controller, c)
self.assertEqual(acl.function, f)
self.assertEqual(acl.tablename, None)
self.assertEqual(acl.unrestricted, False)
self.assertEqual(acl.entity, None)
self.assertEqual(acl.uacl, uacl)
self.assertEqual(acl.oacl, oacl)
self.assertFalse(acl.deleted)
success = auth.permission.delete_acl(group_id,
c=c, f=f)
self.assertTrue(success is not None and success > 0)
acl = table[acl_id]
self.assertNotEqual(acl, None)
self.assertTrue(acl.deleted)
self.assertTrue(acl.deleted_fk, '{"group_id": %d}' % group_id)
finally:
if acl_id:
del table[acl_id]
auth.s3_delete_role(group_id)
# -------------------------------------------------------------------------
def testUpdateTableACL(self):
""" Test update/delete of a table-ACL """
auth = current.auth
table = auth.permission.table
self.assertNotEqual(table, None)
group_id = auth.s3_create_role("Test Role", uid="TEST")
acl_id = None
try:
self.assertTrue(group_id is not None and group_id != 0)
c = "pr"
f = "person"
t = "pr_person"
uacl = auth.permission.NONE
oacl = auth.permission.ALL
acl_id = auth.permission.update_acl(group_id,
c=c, f=f, t=t,
uacl=uacl, oacl=oacl)
self.assertNotEqual(acl_id, None)
self.assertNotEqual(acl_id, 0)
acl = table[acl_id]
self.assertNotEqual(acl, None)
self.assertEqual(acl.controller, None)
self.assertEqual(acl.function, None)
self.assertEqual(acl.tablename, t)
self.assertEqual(acl.unrestricted, False)
self.assertEqual(acl.entity, None)
self.assertEqual(acl.uacl, uacl)
self.assertEqual(acl.oacl, oacl)
self.assertFalse(acl.deleted)
success = auth.permission.delete_acl(group_id,
c=c, f=f, t=t)
self.assertTrue(success is not None and success > 0)
acl = table[acl_id]
self.assertNotEqual(acl, None)
self.assertTrue(acl.deleted)
self.assertTrue(acl.deleted_fk, '{"group_id": %d}' % group_id)
finally:
if acl_id:
del table[acl_id]
auth.s3_delete_role(group_id)
# -------------------------------------------------------------------------
def testApplicableACLsPolicy8(self):
db = current.db
auth = current.auth
s3db = current.s3db
# Create 3 test organisations
xmlstr = """
<s3xml>
<resource name="org_organisation" uuid="TAAO1">
<data field="name">TestApplicableACLsOrg1</data>
</resource>
<resource name="org_organisation" uuid="TAAO2">
<data field="name">TestApplicableACLsOrg2</data>
</resource>
<resource name="org_organisation" uuid="TAAO3">
<data field="name">TestApplicableACLsOrg3</data>
</resource>
</s3xml>"""
try:
auth.override = True
from lxml import etree
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
resource = s3db.resource("org_organisation")
resource.import_xml(xmltree)
resource = s3db.resource("org_organisation",
uid=["TAAO1","TAAO2","TAAO3"])
rows = resource.select(["pe_id", "uuid"], as_rows=True)
orgs = dict((row.uuid, row.pe_id) for row in rows)
org1 = orgs["TAAO1"]
org2 = orgs["TAAO2"]
org3 = orgs["TAAO3"]
auth.override = False
except:
db.rollback()
auth.override = False
raise
try:
# Have two orgs, set org2 as OU descendant of org1
s3db.pr_add_affiliation(org1, org2, role="TestOrgUnit")
# Set org3 as non-OU (role_type=9) partner of org1
partners = s3db.pr_add_affiliation(org1, org3, role="TestPartners", role_type=9)
self.assertNotEqual(partners, None)
# Add the user as OU descendant of org3
user_id = auth.s3_get_user_id("normaluser@example.com")
user_pe = auth.s3_user_pe_id(user_id)
self.assertNotEqual(user_pe, None)
s3db.pr_add_affiliation(org3, user_pe, role="TestStaff")
# Create a TESTGROUP and assign a table ACL
acl = auth.permission
role = auth.s3_create_role("Test Group", None,
dict(c="org", f="office", uacl=acl.ALL, oacl=acl.ALL),
dict(t="org_office", uacl=acl.READ, oacl=acl.ALL),
uid="TESTGROUP")
auth.s3_assign_role(user_id, role)
# We use delegations (policy 8)
current.deployment_settings.security.policy = 8
from s3.s3aaa import S3Permission
auth.permission = S3Permission(auth)
# Impersonate as normal user
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms
delegations = auth.user.delegations
# Check permissions
acls = auth.permission.applicable_acls(acl.DELETE,
realms,
delegations,
c="org",
f="office",
t="org_office",
entity=org2)
self.assertTrue(isinstance(acls, Storage))
self.assertEqual(acls, Storage())
# Delegate TESTGROUP to the TestPartners
auth.s3_delegate_role(role, org1, role="TestPartners")
# Update realms and delegations
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms
delegations = auth.user.delegations
# Check permissions again
acls = auth.permission.applicable_acls(acl.DELETE,
realms,
delegations,
c="org",
f="office",
t="org_office",
entity=org2)
self.assertTrue(isinstance(acls, Storage))
self.assertTrue(org2 in acls)
self.assertEqual(acls[org2], (acl.READ, acl.ALL))
finally:
s3db.pr_remove_affiliation(org1, org2, role="TestOrgUnit")
s3db.pr_remove_affiliation(org1, org2, role="TestStaff")
s3db.pr_remove_affiliation(org1, org3, role="TestPartners")
auth.s3_delete_role("TESTGROUP")
db.rollback()
# =============================================================================
class HasPermissionTests(unittest.TestCase):
""" Test permission check method """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
auth = current.auth
# Create test roles
acl = auth.permission
auth.s3_create_role("DVI Reader", None,
dict(c="dvi",
uacl=acl.READ, oacl=acl.READ|acl.UPDATE),
dict(c="dvi", f="body",
uacl=acl.READ|acl.CREATE, oacl=acl.READ|acl.UPDATE),
dict(t="dvi_body",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
uid="TESTDVIREADER")
auth.s3_create_role("DVI Editor", None,
dict(c="dvi",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
dict(c="dvi", f="body",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
dict(t="dvi_body",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
uid="TESTDVIEDITOR")
auth.s3_create_role("DVI Admin", None,
dict(c="dvi",
uacl=acl.ALL, oacl=acl.ALL),
dict(c="dvi", f="body",
uacl=acl.ALL, oacl=acl.ALL),
dict(t="dvi_body",
uacl=acl.ALL, oacl=acl.ALL),
uid="TESTDVIADMIN")
current.db.commit()
# -------------------------------------------------------------------------
def setUp(self):
db = current.db
auth = current.auth
s3db = current.s3db
settings = current.deployment_settings
self.policy = settings.get_security_policy()
# Get the role IDs
gtable = auth.settings.table_group
row = db(gtable.uuid=="TESTDVIREADER").select(limitby=(0, 1)).first()
self.dvi_reader = row.id
row = db(gtable.uuid=="TESTDVIEDITOR").select(limitby=(0, 1)).first()
self.dvi_editor = row.id
row = db(gtable.uuid=="TESTDVIADMIN").select(limitby=(0, 1)).first()
self.dvi_admin = row.id
auth.s3_impersonate("admin@example.com")
# Create test organisations
table = s3db.org_organisation
record_id = table.insert(name="TestOrganisation1")
s3db.update_super(table, Storage(id=record_id))
self.org1 = s3db.pr_get_pe_id(table, record_id)
record_id = table.insert(name="TestOrganisation2")
s3db.update_super(table, Storage(id=record_id))
self.org2 = s3db.pr_get_pe_id(table, record_id)
record_id = table.insert(name="TestOrganisation3")
s3db.update_super(table, Storage(id=record_id))
self.org3 = s3db.pr_get_pe_id(table, record_id)
# Create test records
table = s3db.dvi_body
record_id = table.insert(pe_label="TestRecord1",
owned_by_user=auth.user.id,
realm_entity=self.org1)
s3db.update_super(table, Storage(id=record_id))
self.record1 = record_id
record_id = table.insert(pe_label="TestRecord2",
owned_by_user=auth.user.id,
realm_entity=self.org2)
s3db.update_super(table, Storage(id=record_id))
self.record2 = record_id
record_id = table.insert(pe_label="TestRecord3",
owned_by_user=auth.user.id,
realm_entity=self.org3)
s3db.update_super(table, Storage(id=record_id))
self.record3 = record_id
# Remove session ownership
auth.s3_clear_session_ownership()
auth.s3_impersonate(None)
auth.override = False
# -------------------------------------------------------------------------
def testPolicy1(self):
""" Test permission check with policy 1 """
auth = current.auth
current.deployment_settings.security.policy = 1
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", table="dvi_body")
self.assertTrue(permitted)
permitted = has_permission("update", table="dvi_body")
self.assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", table="dvi_body")
self.assertTrue(permitted)
permitted = has_permission("update", table="dvi_body")
self.assertTrue(permitted)
# -------------------------------------------------------------------------
def testPolicy3(self):
""" Test permission check with policy 3 """
auth = current.auth
current.deployment_settings.security.policy = 3
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
# Test with TESTDVIREADER
auth.s3_assign_role(auth.user.id, self.dvi_reader)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertTrue(permitted)
permitted = auth.s3_has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertTrue(permitted)
permitted = has_permission("create", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted) # Function ACL not applicable in policy 3
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
auth.s3_retract_role(auth.user.id, self.dvi_reader)
# Test with TESTDVIEDITOR
auth.s3_assign_role(auth.user.id, self.dvi_editor)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("delete", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
# -------------------------------------------------------------------------
def testPolicy4(self):
""" Test permission check with policy 4 """
auth = current.auth
current.deployment_settings.security.policy = 4
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
# Test with TESTDVIREADER
auth.s3_assign_role(auth.user.id, self.dvi_reader)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertTrue(permitted)
permitted = has_permission("create", c="dvi", f="body", table="dvi_body")
self.assertTrue(permitted) # Function ACL overrides controller ACL
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
auth.s3_retract_role(auth.user.id, self.dvi_reader)
# Test with TESTDVIEDITOR
auth.s3_assign_role(auth.user.id, self.dvi_editor)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("delete", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
auth.s3_retract_role(auth.user.id, self.dvi_editor)
# -------------------------------------------------------------------------
def testPolicy5(self):
""" Test permission check with policy 5 """
auth = current.auth
current.deployment_settings.security.policy = 5
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
accessible_url = auth.permission.accessible_url
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
url = accessible_url(c="dvi", f="body")
self.assertEqual(url, False)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
url = accessible_url(c="dvi", f="body")
self.assertEqual(url, False)
# Test with TESTDVIREADER
auth.s3_assign_role(auth.user.id, self.dvi_reader)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertTrue(permitted)
url = accessible_url(c="dvi", f="body")
self.assertNotEqual(url, False)
permitted = has_permission("create", c="dvi", f="body", table="dvi_body")
self.assertTrue(permitted) # Function ACL overrides controller ACL
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted) # Page ACL blocks Table ACL
# Toggle page ACL
acl = auth.permission
auth.permission.update_acl("TESTDVIREADER", c="dvi", f="body",
uacl=acl.READ|acl.CREATE|acl.UPDATE,
oacl=acl.READ|acl.CREATE|acl.UPDATE)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
auth.permission.update_acl("TESTDVIREADER", c="dvi", f="body",
uacl=acl.READ|acl.CREATE,
oacl=acl.READ|acl.CREATE|acl.UPDATE)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
auth.s3_retract_role(auth.user.id, self.dvi_reader)
# Test with TESTDVIEDITOR
auth.s3_assign_role(auth.user.id, self.dvi_editor)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("delete", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
auth.s3_retract_role(auth.user.id, self.dvi_editor)
# -------------------------------------------------------------------------
def testPolicy6(self):
""" Test permission check with policy 6 """
auth = current.auth
current.deployment_settings.security.policy = 6
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
# Test with TESTDVIEDITOR with universal realm
auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=0)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("delete", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertTrue(permitted)
permitted = has_permission("delete", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted)
auth.s3_retract_role(auth.user.id, self.dvi_editor, for_pe=[])
# Test with TESTDVIEDITOR with limited realm
auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org1)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("delete", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted)
permitted = has_permission("delete", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted)
# Extend realm
auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org2)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertTrue(permitted)
# Retract dvi_editor role
auth.s3_retract_role(auth.user.id, self.dvi_editor, for_pe=[])
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted)
# -------------------------------------------------------------------------
def testPolicy7(self):
""" Test permission check with policy 7 """
auth = current.auth
s3db = current.s3db
current.deployment_settings.security.policy = 7
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
# Test with TESTDVIEDITOR with limited realm
auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org1)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted)
# Make org2 a sub-entity of org1
s3db.pr_add_affiliation(self.org1, self.org2, role="TestOrgUnit")
# Reload realms and test again
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertTrue(permitted) # Should now have access
# Make org1 a sub-entity of org2
s3db.pr_remove_affiliation(self.org1, self.org2, role="TestOrgUnit")
s3db.pr_add_affiliation(self.org2, self.org1, role="TestOrgUnit")
# Reload realms
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted) # Should no longer have access
# Switch realm
auth.s3_retract_role(auth.user.id, self.dvi_editor, for_pe=self.org1)
auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org2)
# Reload realms
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertTrue(permitted) # Should have access again
# Remove org1 from realm
s3db.pr_remove_affiliation(self.org2, self.org1, role="TestOrgUnit")
# Reload realms
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertTrue(permitted) # Should have access again
# Retract dvi_editor role
auth.s3_retract_role(auth.user.id, self.dvi_editor, for_pe=[])
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted)
# -------------------------------------------------------------------------
def testPolicy8(self):
""" Test permission check with policy 8 """
auth = current.auth
s3db = current.s3db
current.deployment_settings.security.policy = 8
auth.permission = S3Permission(auth)
user = auth.s3_user_pe_id(auth.s3_get_user_id("normaluser@example.com"))
try:
has_permission = auth.s3_has_permission
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c="dvi", f="body", table="dvi_body")
self.assertFalse(permitted)
# Add the user as OU descendant of org3 and assign dvi_reader
s3db.pr_add_affiliation(self.org3, user, role="TestStaff")
auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org3)
# User should not be able to read record1 or record2, but record3
permitted = has_permission("read", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body", record_id=self.record3)
self.assertTrue(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record3)
self.assertTrue(permitted)
# Make org3 and OU of org2
s3db.pr_add_affiliation(self.org2, self.org3, role="TestOrgUnit")
# Delegate dvi_reader from org1 to org2
auth.s3_delegate_role(self.dvi_reader, self.org1, receiver=self.org2)
# Update realms
auth.s3_impersonate("normaluser@example.com")
# User should be able to read record2, but not record2
permitted = has_permission("read", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertTrue(permitted)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted)
permitted = has_permission("read", c="dvi", f="body", table="dvi_body", record_id=self.record3)
self.assertTrue(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
self.assertFalse(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record2)
self.assertFalse(permitted)
permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record3)
self.assertTrue(permitted)
auth.s3_remove_delegation(self.dvi_reader, self.org1, receiver=self.org2)
finally:
# Remove delegation, affiliation and role
s3db.pr_remove_affiliation(self.org3, user, role="TestStaff")
s3db.pr_remove_affiliation(self.org2, self.org3, role="TestOrgUnit")
auth.s3_retract_role(user, self.dvi_reader, for_pe=self.org3)
current.db.rollback()
# -------------------------------------------------------------------------
#def testPerformance(self):
#MAX_RUNTIME = 6 # Maximum acceptable runtime per request in milliseconds
#deployment_settings.security.policy = 8
#from s3.s3aaa import S3Permission
#auth.permission = S3Permission(auth)
#auth.s3_impersonate("normaluser@example.com")
#has_permission = auth.s3_has_permission
#auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org1)
#def hasPermission():
#permitted = has_permission("update", c="dvi", f="body", table="dvi_body", record_id=self.record1)
#import timeit
#runtime = timeit.Timer(hasPermission).timeit(number=100)
#if runtime > (MAX_RUNTIME / 10.0):
#raise AssertionError("has_permission: maximum acceptable run time exceeded (%sms > %sms)" % (int(runtime * 10), MAX_RUNTIME))
#auth.s3_retract_role(auth.user.id, self.dvi_editor, for_pe=[])
# -------------------------------------------------------------------------
def tearDown(self):
self.role = None
current.deployment_settings.security.policy = self.policy
current.auth.s3_impersonate(None)
current.db.rollback()
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
s3_delete_role = current.auth.s3_delete_role
s3_delete_role("TESTDVIREADER")
s3_delete_role("TESTDVIEDITOR")
s3_delete_role("TESTDVIADMIN")
# =============================================================================
class AccessibleQueryTests(unittest.TestCase):
""" Test accessible query for all policies """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
auth = current.auth
# Create test roles
acl = auth.permission
s3_create_role = auth.s3_create_role
s3_create_role("DVI Reader", None,
dict(c="dvi",
uacl=acl.READ, oacl=acl.READ),
dict(c="dvi", f="body",
uacl=acl.READ|acl.CREATE, oacl=acl.READ|acl.UPDATE|acl.DELETE),
dict(t="dvi_body",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
uid="TESTDVIREADER")
s3_create_role("DVI Editor", None,
dict(c="dvi",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
dict(c="dvi", f="body",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
dict(t="dvi_body",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
uid="TESTDVIEDITOR")
s3_create_role("DVI Admin", None,
dict(c="dvi",
uacl=acl.ALL, oacl=acl.ALL),
dict(c="dvi", f="body",
uacl=acl.ALL, oacl=acl.ALL),
dict(t="dvi_body",
uacl=acl.ALL, oacl=acl.ALL),
uid="TESTDVIADMIN")
current.db.commit()
# -------------------------------------------------------------------------
def setUp(self):
db = current.db
s3db = current.s3db
auth = current.auth
update_super = s3db.update_super
settings = current.deployment_settings
self.policy = settings.get_security_policy()
self.strict = settings.get_security_strict_ownership()
settings.security.strict_ownership = False
# Get the role IDs
gtable = auth.settings.table_group
row = db(gtable.uuid=="TESTDVIREADER").select(limitby=(0, 1)).first()
self.dvi_reader = row.id
row = db(gtable.uuid=="TESTDVIEDITOR").select(limitby=(0, 1)).first()
self.dvi_editor = row.id
row = db(gtable.uuid=="TESTDVIADMIN").select(limitby=(0, 1)).first()
self.dvi_admin = row.id
auth.s3_impersonate("admin@example.com")
# Create test organisations
table = s3db.org_organisation
record_id = table.insert(name="TestOrganisation1")
update_super(table, Storage(id=record_id))
self.org1 = s3db.pr_get_pe_id(table, record_id)
record_id = table.insert(name="TestOrganisation2")
update_super(table, Storage(id=record_id))
self.org2 = s3db.pr_get_pe_id(table, record_id)
record_id = table.insert(name="TestOrganisation3")
update_super(table, Storage(id=record_id))
self.org3 = s3db.pr_get_pe_id(table, record_id)
# Create test records
table = s3db.dvi_body
record_id = table.insert(pe_label="TestRecord1",
owned_by_user=auth.user.id,
realm_entity=self.org1)
update_super(table, Storage(id=record_id))
self.record1 = record_id
record_id = table.insert(pe_label="TestRecord2",
owned_by_user=auth.user.id,
realm_entity=self.org2)
update_super(table, Storage(id=record_id))
self.record2 = record_id
record_id = table.insert(pe_label="TestRecord3",
owned_by_user=auth.user.id,
realm_entity=self.org3)
update_super(table, Storage(id=record_id))
self.record3 = record_id
# Remove session ownership
auth.s3_clear_session_ownership()
auth.s3_impersonate(None)
settings.auth.record_approval = False
# -------------------------------------------------------------------------
def testPolicy3(self):
auth = current.auth
current.deployment_settings.security.policy = 3
auth.permission = S3Permission(auth)
assertEqual = self.assertEqual
accessible_query = auth.s3_accessible_query
table = current.s3db.dvi_body
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
# Test with TESTDVIREADER
auth.s3_assign_role(auth.user.id, self.dvi_reader)
query = accessible_query("read", "dvi_body", c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id > 0)")
query = accessible_query("update",table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
query = accessible_query("delete", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
auth.s3_retract_role(auth.user.id, self.dvi_reader)
# Test with TESTDVIEDITOR
auth.s3_assign_role(auth.user.id, self.dvi_editor)
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id > 0)")
query = accessible_query("update", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id > 0)")
query = accessible_query("delete", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
auth.s3_retract_role(auth.user.id, self.dvi_editor)
# -------------------------------------------------------------------------
def testPolicy4(self):
auth = current.auth
current.deployment_settings.security.policy = 4
auth.permission = S3Permission(auth)
assertEqual = self.assertEqual
accessible_query = auth.s3_accessible_query
table = current.s3db.dvi_body
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
# Test with TESTDVIREADER
auth.s3_assign_role(auth.user.id, self.dvi_reader)
query = accessible_query("read", "dvi_body", c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id > 0)")
query = accessible_query("update",table, c="dvi", f="body")
roles = ",".join([str(r) for r in auth.user.realms if r is not None])
assertEqual(str(query), "(((dvi_body.owned_by_user = %s) OR "
"((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL))) OR "
"(dvi_body.owned_by_group IN (%s)))" %
(auth.user.id, roles))
query = accessible_query("delete", table, c="dvi", f="body")
assertEqual(str(query), "(((dvi_body.owned_by_user = %s) OR "
"((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL))) OR "
"(dvi_body.owned_by_group IN (%s)))" %
(auth.user.id, roles))
auth.s3_retract_role(auth.user.id, self.dvi_reader)
# Test with TESTDVIEDITOR
auth.s3_assign_role(auth.user.id, self.dvi_editor)
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id > 0)")
query = accessible_query("update", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id > 0)")
query = accessible_query("delete", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
auth.s3_retract_role(auth.user.id, self.dvi_editor)
# -------------------------------------------------------------------------
def testPolicy5(self):
auth = current.auth
current.deployment_settings.security.policy = 5
auth.permission = S3Permission(auth)
assertEqual = self.assertEqual
accessible_query = auth.s3_accessible_query
table = current.s3db.dvi_body
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
# Test with TESTDVIREADER
auth.s3_assign_role(auth.user.id, self.dvi_reader)
query = accessible_query("read", "dvi_body", c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id > 0)")
query = accessible_query("update",table, c="dvi", f="body")
roles = ",".join([str(r) for r in auth.user.realms if r is not None])
assertEqual(str(query), "(((dvi_body.owned_by_user = %s) OR "
"((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL))) OR "
"(dvi_body.owned_by_group IN (%s)))" %
(auth.user.id, roles))
query = accessible_query("delete", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
auth.s3_retract_role(auth.user.id, self.dvi_reader)
# Test with TESTDVIEDITOR
auth.s3_assign_role(auth.user.id, self.dvi_editor)
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id > 0)")
query = accessible_query("update", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id > 0)")
query = accessible_query("delete", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
auth.s3_retract_role(auth.user.id, self.dvi_editor)
# -------------------------------------------------------------------------
def testPolicy6(self):
auth = current.auth
current.deployment_settings.security.policy = 6
auth.permission = S3Permission(auth)
assertEqual = self.assertEqual
accessible_query = auth.s3_accessible_query
table = current.s3db.dvi_body
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
# Test with TESTDVIREADER
auth.s3_assign_role(auth.user.id, self.dvi_reader, for_pe=self.org1)
query = accessible_query("read", "dvi_body", c="dvi", f="body")
assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org1, auth.user.id))
query = accessible_query("update",table, c="dvi", f="body")
assertEqual(str(query), "(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(((dvi_body.owned_by_group = %s) AND "
"(dvi_body.realm_entity IN (%s))) OR "
"(dvi_body.owned_by_group IN (2,3))))" %
(auth.user.id, self.dvi_reader, self.org1))
query = accessible_query("delete", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
auth.s3_retract_role(auth.user.id, self.dvi_reader)
# Test with TESTDVIEDITOR
auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org1)
query = accessible_query("read", table, c="dvi", f="body")
assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org1, auth.user.id))
query = accessible_query("update", table, c="dvi", f="body")
assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org1, auth.user.id))
query = accessible_query("delete", table, c="dvi", f="body")
assertEqual(str(query), "(dvi_body.id = 0)")
auth.s3_retract_role(auth.user.id, self.dvi_editor)
# Logout
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testPolicy7(self):
auth = current.auth
s3db = current.s3db
current.deployment_settings.security.policy = 7
auth.permission = S3Permission(auth)
accessible_query = auth.s3_accessible_query
table = s3db.dvi_body
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c="dvi", f="body")
self.assertEqual(str(query), "(dvi_body.id = 0)")
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c="dvi", f="body")
self.assertEqual(str(query), "(dvi_body.id = 0)")
# Test with TESTDVIREADER
auth.s3_assign_role(auth.user.id, self.dvi_reader, for_pe=self.org1)
current.deployment_settings.security.strict_ownership = True
query = accessible_query("read", "dvi_body", c="dvi", f="body")
self.assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"((dvi_body.owned_by_user = %s) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org1, auth.user.id))
current.deployment_settings.security.strict_ownership = False
query = accessible_query("read", "dvi_body", c="dvi", f="body")
self.assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org1, auth.user.id))
query = accessible_query("update",table, c="dvi", f="body")
self.assertEqual(str(query), "(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(((dvi_body.owned_by_group = %s) AND "
"(dvi_body.realm_entity IN (%s))) OR "
"(dvi_body.owned_by_group IN (2,3))))" %
(auth.user.id, self.dvi_reader, self.org1))
query = accessible_query("delete", table, c="dvi", f="body")
self.assertEqual(str(query), "(dvi_body.id = 0)")
# Make org2 a sub-entity of org1
s3db.pr_add_affiliation(self.org1, self.org2, role="TestOrgUnit")
# Reload realms and delegations
auth.s3_impersonate("normaluser@example.com")
# Re-check queries
query = accessible_query("read", "dvi_body", c="dvi", f="body")
qstr = ("(((dvi_body.realm_entity IN (%s,%s)) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))")
self.assertTrue(str(query) == qstr % (self.org1, self.org2, auth.user.id) or
str(query) == qstr % (self.org2, self.org1, auth.user.id))
query = accessible_query("update",table, c="dvi", f="body")
qstr = ("(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(((dvi_body.owned_by_group = %s) AND "
"(dvi_body.realm_entity IN (%s,%s))) OR "
"(dvi_body.owned_by_group IN (2,3))))")
self.assertTrue(str(query) == qstr % (auth.user.id, self.dvi_reader, self.org1, self.org2) or
str(query) == qstr % (auth.user.id, self.dvi_reader, self.org2, self.org1))
query = accessible_query("delete", table, c="dvi", f="body")
self.assertEqual(str(query), "(dvi_body.id = 0)")
s3db.pr_remove_affiliation(self.org1, self.org2, role="TestOrgUnit")
auth.s3_retract_role(auth.user.id, self.dvi_reader)
# Test with TESTDVIEDITOR
auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org1)
query = accessible_query("read", table, c="dvi", f="body")
self.assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org1, auth.user.id))
query = accessible_query("update", table, c="dvi", f="body")
self.assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org1, auth.user.id))
query = accessible_query("delete", table, c="dvi", f="body")
self.assertEqual(str(query), "(dvi_body.id = 0)")
# Make org2 a sub-entity of org1
s3db.pr_add_affiliation(self.org1, self.org2, role="TestOrgUnit")
# Reload realms and delegations
auth.s3_impersonate("normaluser@example.com")
# Re-check queries
qstr = ("(((dvi_body.realm_entity IN (%s,%s)) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))")
query = accessible_query("read", table, c="dvi", f="body")
self.assertTrue(str(query) == qstr % (self.org1, self.org2, auth.user.id) or
str(query) == qstr % (self.org2, self.org1, auth.user.id))
query = accessible_query("update", table, c="dvi", f="body")
self.assertTrue(str(query) == qstr % (self.org1, self.org2, auth.user.id) or
str(query) == qstr % (self.org2, self.org1, auth.user.id))
query = accessible_query("delete", table, c="dvi", f="body")
self.assertEqual(str(query), "(dvi_body.id = 0)")
s3db.pr_remove_affiliation(self.org1, self.org2, role="TestOrgUnit")
auth.s3_retract_role(auth.user.id, self.dvi_editor)
# -------------------------------------------------------------------------
def testPolicy8(self):
s3db = current.s3db
auth = current.auth
current.deployment_settings.security.policy = 8
auth.permission = S3Permission(auth)
accessible_query = auth.s3_accessible_query
table = s3db.dvi_body
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c="dvi", f="body")
self.assertEqual(str(query), "(dvi_body.id = 0)")
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c="dvi", f="body")
self.assertEqual(str(query), "(dvi_body.id = 0)")
record = None
try:
# Add the user as OU descendant of org3 and assign dvi_editor
user = auth.s3_user_pe_id(auth.s3_get_user_id("normaluser@example.com"))
s3db.pr_add_affiliation(self.org3, user, role="TestStaff")
auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org3)
# User should only be able to access records of org3
query = accessible_query("read", table, c="dvi", f="body")
self.assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org3, auth.user.id))
query = accessible_query("update", table, c="dvi", f="body")
self.assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org3, auth.user.id))
# Make org3 and OU of org2
s3db.pr_add_affiliation(self.org2, self.org3, role="TestOrgUnit")
auth.s3_delegate_role(self.dvi_reader, self.org1, receiver=self.org2)
# Update realms
auth.s3_impersonate("normaluser@example.com")
# User should now be able to read records of org1 and org3, but update only org3
query = accessible_query("read", table, c="dvi", f="body")
qstr = ("(((dvi_body.realm_entity IN (%s,%s)) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))")
self.assertTrue(str(query) == qstr % (self.org1, self.org3, auth.user.id) or
str(query) == qstr % (self.org3, self.org1, auth.user.id))
query = accessible_query("update", table, c="dvi", f="body")
qstr = ("(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))")
self.assertEqual(str(query), qstr % (self.org3, auth.user.id))
# Remove the affiliation with org2
s3db.pr_remove_affiliation(self.org2, self.org3, role="TestOrgUnit")
# Update realms
auth.s3_impersonate("normaluser@example.com")
# Check queries again
query = accessible_query("read", table, c="dvi", f="body")
self.assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org3, auth.user.id))
query = accessible_query("update", table, c="dvi", f="body")
self.assertEqual(str(query), "(((dvi_body.realm_entity = %s) OR "
"(dvi_body.realm_entity IS NULL)) OR "
"(((dvi_body.owned_by_user = %s) OR "
"(((dvi_body.owned_by_user IS NULL) AND "
"(dvi_body.owned_by_group IS NULL)) AND "
"(dvi_body.realm_entity IS NULL))) OR "
"(dvi_body.owned_by_group IN (2,3))))" % (self.org3, auth.user.id))
finally:
# Remove delegation, affiliation and role
s3db.pr_remove_affiliation(self.org3, user, role="TestStaff")
s3db.pr_remove_affiliation(self.org2, self.org3, role="TestOrgUnit")
auth.s3_retract_role(user, self.dvi_reader, for_pe=self.org3)
# -------------------------------------------------------------------------
#def testPerformance(self):
#MAX_RUNTIME = 5 # Maximum acceptable runtime per request in milliseconds
#deployment_settings.security.policy = 8
#from s3.s3aaa import S3Permission
#auth.permission = S3Permission(auth)
#auth.s3_impersonate("normaluser@example.com")
#accessible_query = auth.s3_accessible_query
#table = s3db.dvi_body
#auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org1)
#def accessibleQuery():
#query = accessible_query("update", table, c="dvi", f="body")
#import timeit
#runtime = timeit.Timer(accessibleQuery).timeit(number=100)
#if runtime > (MAX_RUNTIME / 10.0):
#raise AssertionError("accessible_query: maximum acceptable run time exceeded (%sms > %sms)" % (int(runtime * 10), MAX_RUNTIME))
#auth.s3_retract_role(auth.user.id, self.dvi_editor, for_pe=[])
# -------------------------------------------------------------------------
def tearDown(self):
self.role = None
current.deployment_settings.security.policy = self.policy
current.deployment_settings.security.strict_ownership = self.strict
current.auth.s3_impersonate(None)
current.db.rollback()
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
auth = current.auth
auth.s3_delete_role("TESTDVIREADER")
auth.s3_delete_role("TESTDVIEDITOR")
auth.s3_delete_role("TESTDVIADMIN")
# =============================================================================
class DelegationTests(unittest.TestCase):
""" Test delegation of roles """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
auth = current.auth
# Create test roles
acl = auth.permission
auth.s3_create_role("DVI Reader", None,
dict(c="dvi",
uacl=acl.READ, oacl=acl.READ|acl.UPDATE),
dict(c="dvi", f="body",
uacl=acl.READ|acl.CREATE, oacl=acl.READ|acl.UPDATE),
dict(t="dvi_body",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
uid="TESTDVIREADER")
auth.s3_create_role("DVI Editor", None,
dict(c="dvi",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
dict(c="dvi", f="body",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
dict(t="dvi_body",
uacl=acl.READ|acl.CREATE|acl.UPDATE, oacl=acl.READ|acl.UPDATE),
uid="TESTDVIEDITOR")
auth.s3_create_role("DVI Admin", None,
dict(c="dvi",
uacl=acl.ALL, oacl=acl.ALL),
dict(c="dvi", f="body",
uacl=acl.ALL, oacl=acl.ALL),
dict(t="dvi_body",
uacl=acl.ALL, oacl=acl.ALL),
uid="TESTDVIADMIN")
# -------------------------------------------------------------------------
def setUp(self):
db = current.db
s3db = current.s3db
auth = current.auth
self.policy = current.deployment_settings.get_security_policy()
# Get the role IDs
gtable = auth.settings.table_group
row = db(gtable.uuid=="TESTDVIREADER").select(limitby=(0, 1)).first()
self.dvi_reader = row.id
row = db(gtable.uuid=="TESTDVIEDITOR").select(limitby=(0, 1)).first()
self.dvi_editor = row.id
row = db(gtable.uuid=="TESTDVIADMIN").select(limitby=(0, 1)).first()
self.dvi_admin = row.id
auth.s3_impersonate("admin@example.com")
# Create test organisations
table = s3db.org_organisation
record_id = table.insert(name="TestOrganisation1")
s3db.update_super(table, Storage(id=record_id))
self.org1 = s3db.pr_get_pe_id(table, record_id)
record_id = table.insert(name="TestOrganisation2")
s3db.update_super(table, Storage(id=record_id))
self.org2 = s3db.pr_get_pe_id(table, record_id)
record_id = table.insert(name="TestOrganisation3")
s3db.update_super(table, Storage(id=record_id))
self.org3 = s3db.pr_get_pe_id(table, record_id)
# Create test records
table = s3db.dvi_body
record_id = table.insert(pe_label="TestRecord1",
owned_by_user=auth.user.id,
realm_entity=self.org1)
s3db.update_super(table, Storage(id=record_id))
self.record1 = record_id
record_id = table.insert(pe_label="TestRecord2",
owned_by_user=auth.user.id,
realm_entity=self.org2)
s3db.update_super(table, Storage(id=record_id))
self.record2 = record_id
record_id = table.insert(pe_label="TestRecord3",
owned_by_user=auth.user.id,
realm_entity=self.org3)
s3db.update_super(table, Storage(id=record_id))
self.record3 = record_id
# Remove session ownership
auth.s3_clear_session_ownership()
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testRoleDelegation(self):
""" Test delegation of a role """
s3db = current.s3db
auth = current.auth
current.deployment_settings.security.policy = 8
auth.permission = S3Permission(auth)
auth.s3_impersonate("normaluser@example.com")
user = auth.user.pe_id
try:
# Add the user as OU descendant of org3 and assign dvi_reader
s3db.pr_add_affiliation(self.org3, user, role="TestStaff")
auth.s3_assign_role(auth.user.id, self.dvi_editor, for_pe=self.org3)
# Make org3 an OU descendant of org2
s3db.pr_add_affiliation(self.org2, self.org3, role="TestOrgUnit")
# Delegate the dvi_reader role for org1 to org2
auth.s3_delegate_role(self.dvi_reader, self.org1, receiver=self.org2)
# Check the delegations
delegations = auth.user.delegations
self.assertTrue(self.dvi_reader in delegations)
self.assertTrue(self.org3 in delegations[self.dvi_reader])
self.assertTrue(self.org1 in delegations[self.dvi_reader][self.org3])
auth.s3_remove_delegation(self.dvi_reader, self.org1, receiver=self.org2)
# Check the delegations
delegations = auth.user.delegations
self.assertEqual(delegations.keys(), [])
# Delegate the dvi_reader role for org1 to org2
auth.s3_delegate_role([self.dvi_reader, self.dvi_editor], self.org1, receiver=self.org2)
delegations = auth.s3_get_delegations(self.org1)
self.assertNotEqual(delegations, None)
self.assertTrue(isinstance(delegations, Storage))
self.assertTrue(self.org2 in delegations)
self.assertTrue(isinstance(delegations[self.org2], list))
self.assertEqual(len(delegations[self.org2]), 2)
self.assertTrue(self.dvi_reader in delegations[self.org2])
self.assertTrue(self.dvi_editor in delegations[self.org2])
# Check the delegations
delegations = auth.user.delegations
self.assertTrue(self.dvi_reader in delegations)
self.assertTrue(self.dvi_editor in delegations)
self.assertTrue(self.org3 in delegations[self.dvi_reader])
self.assertTrue(self.org1 in delegations[self.dvi_reader][self.org3])
self.assertTrue(self.org3 in delegations[self.dvi_editor])
self.assertTrue(self.org1 in delegations[self.dvi_editor][self.org3])
auth.s3_remove_delegation(self.dvi_editor, self.org1, receiver=self.org2)
delegations = auth.s3_get_delegations(self.org1)
self.assertNotEqual(delegations, None)
self.assertTrue(isinstance(delegations, Storage))
self.assertTrue(self.org2 in delegations)
self.assertTrue(isinstance(delegations[self.org2], list))
self.assertEqual(len(delegations[self.org2]), 1)
self.assertTrue(self.dvi_reader in delegations[self.org2])
# Check the delegations
delegations = auth.user.delegations
self.assertTrue(self.dvi_reader in delegations)
self.assertFalse(self.dvi_editor in delegations)
self.assertTrue(self.org3 in delegations[self.dvi_reader])
self.assertTrue(self.org1 in delegations[self.dvi_reader][self.org3])
auth.s3_remove_delegation(self.dvi_reader, self.org1, receiver=self.org2)
delegations = auth.s3_get_delegations(self.org1)
self.assertNotEqual(delegations, None)
self.assertTrue(isinstance(delegations, Storage))
self.assertEqual(delegations.keys(), [])
# Check the delegations
delegations = auth.user.delegations
self.assertEqual(delegations.keys(), [])
finally:
# Remove delegation, affiliation and role
s3db.pr_remove_affiliation(self.org3, user, role="TestStaff")
s3db.pr_remove_affiliation(self.org2, self.org3, role="TestOrgUnit")
auth.s3_retract_role(user, self.dvi_reader, for_pe=self.org3)
current.db.rollback()
# -------------------------------------------------------------------------
def tearDown(self):
self.role = None
current.deployment_settings.security.policy = self.policy
current.auth.s3_impersonate(None)
current.db.rollback()
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
auth = current.auth
auth.s3_delete_role("TESTDVIREADER")
auth.s3_delete_role("TESTDVIEDITOR")
auth.s3_delete_role("TESTDVIADMIN")
# =============================================================================
class RecordApprovalTests(unittest.TestCase):
""" Tests for the record approval framework """
# -------------------------------------------------------------------------
def setUp(self):
auth = current.auth
settings = current.deployment_settings
sr = auth.get_system_roles()
auth.permission.update_acl(sr.AUTHENTICATED,
c="org",
uacl=auth.permission.READ,
oacl=auth.permission.READ|auth.permission.UPDATE)
auth.permission.update_acl(sr.AUTHENTICATED,
t="org_organisation",
uacl=auth.permission.READ|auth.permission.CREATE,
oacl=auth.permission.READ|auth.permission.UPDATE)
self.policy = settings.get_security_policy()
settings.security.policy = 5
self.approval = settings.get_auth_record_approval()
settings.auth.record_approval = False
self.approval_for = settings.get_auth_record_approval_required_for()
settings.auth.record_approval_required_for = None
auth.override = False
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testRecordApprovedBy(self):
""" Test whether a new record is unapproved by default """
db = current.db
auth = current.auth
s3db = current.s3db
settings = current.deployment_settings
try:
# Set record approval on
settings.auth.record_approval = True
# Impersonate as admin
auth.s3_impersonate("admin@example.com")
# Create test record
otable = s3db.org_organisation
otable.approved_by.default = None
org = Storage(name="Test Approval Organisation")
org_id = otable.insert(**org)
self.assertTrue(org_id > 0)
org.update(id=org_id)
s3db.update_super(otable, org)
# Check record
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
finally:
db.rollback()
settings.auth.record_approval = False
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testRequiresApproval(self):
""" Test requires_approval settings """
s3db = current.s3db
settings = current.deployment_settings
approval = settings.get_auth_record_approval()
tables = settings.get_auth_record_approval_required_for()
org_approval = s3db.get_config("org_organisation", "requires_approval")
approval_required = current.auth.permission.requires_approval
try:
# Approval globally turned off
settings.auth.record_approval = False
settings.auth.record_approval_required_for = []
s3db.configure("org_organisation", requires_approval=True)
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, but set to no tables and table=off
settings.auth.record_approval = True
settings.auth.record_approval_required_for = []
s3db.configure("org_organisation", requires_approval=False)
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, but set to no tables yet table=on
settings.auth.record_approval = True
settings.auth.record_approval_required_for = []
s3db.configure("org_organisation", requires_approval=True)
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, but set to any tables and table=on
settings.auth.record_approval = True
settings.auth.record_approval_required_for = None
s3db.configure("org_organisation", requires_approval=True)
self.assertTrue(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, but set to different tables and table=on
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ["project_project"]
s3db.configure("org_organisation", requires_approval=True)
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, set to this table and table=off
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ["org_organisation"]
s3db.configure("org_organisation", requires_approval=False)
self.assertTrue(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, set to any table and table=off
settings.auth.record_approval = True
settings.auth.record_approval_required_for = None
s3db.configure("org_organisation", requires_approval=False)
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, set to any table and no table config
settings.auth.record_approval = True
settings.auth.record_approval_required_for = None
s3db.clear_config("org_organisation", "requires_approval")
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
finally:
settings.auth.record_approval = approval
settings.auth.record_approval_required_for = tables
if org_approval is not None:
s3db.configure("org_organisation",
requires_approval = org_approval)
current.auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testSetDefaultApprover(self):
"""
Test whether default approver is set if current user has
permission to approve records in a table
"""
auth = current.auth
acl = auth.permission
AUTHENTICATED = auth.get_system_roles().AUTHENTICATED
otable = current.s3db.org_organisation
otable.approved_by.default = None
auth.s3_impersonate("normaluser@example.com")
acl.set_default_approver(otable)
self.assertEqual(otable.approved_by.default, None)
# Give user review and approve permissions on this table
acl.update_acl(AUTHENTICATED,
c="org",
uacl=acl.READ|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
acl.update_acl(AUTHENTICATED,
t="org_organisation",
uacl=acl.READ|acl.CREATE|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
auth.s3_impersonate("normaluser@example.com")
acl.set_default_approver(otable)
self.assertEqual(otable.approved_by.default, auth.user.id)
auth.s3_impersonate("admin@example.com")
acl.set_default_approver(otable)
self.assertEqual(otable.approved_by.default, auth.user.id)
auth.s3_impersonate(None)
acl.set_default_approver(otable)
self.assertEqual(otable.approved_by.default, None)
# -------------------------------------------------------------------------
def testRecordApprovalWithComponents(self):
""" Test record approval including components """
db = current.db
auth = current.auth
s3db = current.s3db
settings = current.deployment_settings
# Set record approval on
settings.auth.record_approval = True
self.approved_org = None
def org_onapprove_test(record):
self.approved_org = record.id
org_onapprove = s3db.get_config("org_organisation", "onapprove")
otable_requires_approval = s3db.get_config("org_organisation", "requires_approval", False)
s3db.configure("org_organisation",
onapprove=org_onapprove_test,
requires_approval=True)
self.approved_office = None
def office_onapprove_test(record):
self.approved_office = record.id
office_onapprove = s3db.get_config("org_office", "onapprove")
ftable_requires_approval = s3db.get_config("org_office", "requires_approval", False)
s3db.configure("org_office",
onapprove=office_onapprove_test,
requires_approval=True)
try:
# Impersonate as admin
auth.s3_impersonate("admin@example.com")
# Create test record
otable = s3db.org_organisation
otable.approved_by.default = None
org = Storage(name="Test Approval Organisation")
org_id = otable.insert(**org)
self.assertTrue(org_id > 0)
org.update(id=org_id)
s3db.update_super(otable, org)
# Create test component
ftable = s3db.org_office
ftable.approved_by.default = None
office = Storage(name="Test Approval Office",
organisation_id=org_id)
office_id = ftable.insert(**office)
self.assertTrue(office_id > 0)
office.update(id=office_id)
s3db.update_super(ftable, office)
# Check records
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
approved = auth.permission.approved
unapproved = auth.permission.unapproved
# Check approved/unapproved
self.assertFalse(approved(otable, org_id))
self.assertTrue(unapproved(otable, org_id))
self.assertFalse(approved(ftable, office_id))
self.assertTrue(unapproved(ftable, office_id))
# Approve
resource = s3db.resource("org_organisation", id=org_id, unapproved=True)
self.assertTrue(resource.approve(components=["office"]))
# Check record
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, auth.user.id)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, auth.user.id)
# Check approved/unapproved
self.assertTrue(approved(otable, org_id))
self.assertFalse(unapproved(otable, org_id))
self.assertTrue(approved(ftable, office_id))
self.assertFalse(unapproved(ftable, office_id))
# Check hooks
self.assertEqual(self.approved_org, org_id)
self.assertEqual(self.approved_office, office_id)
finally:
current.db.rollback()
settings.auth.record_approval = False
auth.s3_impersonate(None)
s3db.configure("org_organisation",
onapprove=org_onapprove,
requires_approval=otable_requires_approval)
s3db.configure("org_office",
onapprove=office_onapprove,
requires_approval=ftable_requires_approval)
# -------------------------------------------------------------------------
def testRecordApprovalWithoutComponents(self):
""" Test record approval without components"""
db = current.db
auth = current.auth
s3db = current.s3db
settings = current.deployment_settings
# Set record approval on
settings.auth.record_approval = True
otable = s3db.org_organisation
otable_requires_approval = s3db.get_config(otable, "requires_approval", None)
s3db.configure(otable, requires_approval=True)
ftable = s3db.org_office
ftable_requires_approval = s3db.get_config(ftable, "requires_approval", None)
s3db.configure(ftable, requires_approval=True)
try:
# Impersonate as admin
auth.s3_impersonate("admin@example.com")
# Create test record
otable = s3db.org_organisation
otable.approved_by.default = None
org = Storage(name="Test Approval Organisation")
org_id = otable.insert(**org)
self.assertTrue(org_id > 0)
org.update(id=org_id)
s3db.update_super(otable, org)
# Create test component
ftable = s3db.org_office
ftable.approved_by.default = None
office = Storage(name="Test Approval Office",
organisation_id=org_id)
office_id = ftable.insert(**office)
self.assertTrue(office_id > 0)
office.update(id=office_id)
s3db.update_super(ftable, office)
# Check records
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
approved = auth.permission.approved
unapproved = auth.permission.unapproved
# Check approved/unapproved
self.assertFalse(approved(otable, org_id))
self.assertTrue(unapproved(otable, org_id))
self.assertFalse(approved(ftable, office_id))
self.assertTrue(unapproved(ftable, office_id))
# Approve
resource = s3db.resource("org_organisation", id=org_id, unapproved=True)
self.assertTrue(resource.approve(components=None))
# Check record
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, auth.user.id)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
# Check approved/unapproved
self.assertTrue(approved(otable, org_id))
self.assertFalse(unapproved(otable, org_id))
self.assertFalse(approved(ftable, office_id))
self.assertTrue(unapproved(ftable, office_id))
finally:
current.db.rollback()
settings.auth.record_approval = False
if otable_requires_approval is not None:
s3db.configure("org_organisation",
requires_approval=otable_requires_approval)
if ftable_requires_approval is not None:
s3db.configure("org_office",
requires_approval=ftable_requires_approval)
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testRecordReject(self):
db = current.db
auth = current.auth
s3db = current.s3db
settings = current.deployment_settings
self.rejected_org = None
def org_onreject_test(record):
self.rejected_org = record.id
org_onreject = s3db.get_config("org_organisation", "onreject")
s3db.configure("org_organisation", onreject=org_onreject_test)
self.rejected_office = None
def office_onreject_test(record):
self.rejected_office = record.id
office_onreject = s3db.get_config("org_office", "onreject")
s3db.configure("org_office", onreject=office_onreject_test)
# Set record approval on
settings.auth.record_approval = True
otable = s3db.org_organisation
otable_requires_approval = s3db.get_config(otable, "requires_approval", None)
otable.approved_by.default = None
ftable = s3db.org_office
ftable_requires_approval = s3db.get_config(ftable, "requires_approval", None)
ftable.approved_by.default = None
try:
# Impersonate as admin
auth.s3_impersonate("admin@example.com")
# Create test record
org = Storage(name="Test Reject Organisation")
org_id = otable.insert(**org)
self.assertTrue(org_id > 0)
org.update(id=org_id)
s3db.update_super(otable, org)
# Create test component
office = Storage(name="Test Reject Office",
organisation_id=org_id)
office_id = ftable.insert(**office)
self.assertTrue(office_id > 0)
office.update(id=office_id)
s3db.update_super(ftable, office)
# Check records
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
# Activate approval for these tables
s3db.configure(otable, requires_approval=True)
s3db.configure(ftable, requires_approval=True)
approved = auth.permission.approved
unapproved = auth.permission.unapproved
# Check approved/unapproved
self.assertFalse(approved(otable, org_id))
self.assertTrue(unapproved(otable, org_id))
self.assertFalse(approved(ftable, office_id))
self.assertTrue(unapproved(ftable, office_id))
# Reject
resource = s3db.resource("org_organisation", id=org_id, unapproved=True)
self.assertTrue(resource.reject())
# Check records
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
self.assertTrue(row.deleted)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
self.assertTrue(row.deleted)
# Check hooks
self.assertEqual(self.rejected_org, org_id)
self.assertEqual(self.rejected_office, office_id)
finally:
current.db.rollback()
settings.auth.record_approval = False
auth.s3_impersonate(None)
s3db.configure("org_organisation", onreject=org_onreject)
if otable_requires_approval is not None:
s3db.configure("org_organisation",
requires_approval=otable_requires_approval)
s3db.configure("org_office", onreject=office_onreject)
if ftable_requires_approval is not None:
s3db.configure("org_office",
onreject=office_onreject,
requires_approval=ftable_requires_approval)
# -------------------------------------------------------------------------
def testHasPermissionWithRecordApproval(self):
""" Test has_permission with record approval """
db = current.db
auth = current.auth
acl = auth.permission
s3db = current.s3db
settings = current.deployment_settings
has_permission = auth.s3_has_permission
AUTHENTICATED = auth.get_system_roles().AUTHENTICATED
# Store global settings
approval = settings.get_auth_record_approval()
approval_required = settings.get_auth_record_approval_required_for()
# Record approval on, but for no tables
settings.auth.record_approval = True
settings.auth.record_approval_required_for = []
try:
# Impersonate as admin
auth.s3_impersonate("admin@example.com")
# Create test record
otable = s3db.org_organisation
otable.approved_by.default = None
org = Storage(name="Test Approval Organisation")
org_id = otable.insert(**org)
self.assertTrue(org_id > 0)
org.update(id=org_id)
s3db.update_super(otable, org)
# Normal can see unapproved record if approval is not on for this table
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("update", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("delete", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted) # not allowed as per ACL!
# They can not run any of the approval methods without permission, though
permitted = has_permission("review", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("approve", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("reject", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
# Turn on approval for this table
settings.auth.record_approval_required_for = ["org_organisation"]
# Normal user must not see unapproved record
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("update", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("delete", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
# Normal user can not review/approve/reject the record
permitted = has_permission(["read", "review"], otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("review", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("approve", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("reject", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
# Normal user can see the unapproved record if he owns it
db(otable.id==org_id).update(owned_by_user=auth.user.id)
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("update", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("delete", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted) # not permitted per ACL
# Normal user can not review/approve/reject the record even if he owns it
permitted = has_permission("review", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("approve", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("reject", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
db(otable.id==org_id).update(owned_by_user=None)
# Give user review and approve permissions on this table
acl.update_acl(AUTHENTICATED,
c="org",
uacl=acl.READ|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
acl.update_acl(AUTHENTICATED,
t="org_organisation",
uacl=acl.READ|acl.CREATE|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
# Normal user can still not see unapproved record even if they have approve-permissions
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("update", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("delete", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
# Normal user can review/approve/reject if they have the approver role
permitted = has_permission(["read", "review"], otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("review", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("approve", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("reject", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
# Admin can always see the record
auth.s3_impersonate("admin@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
# Approve the record
resource = s3db.resource(otable, id=org_id, unapproved=True)
resource.approve()
# Normal user can not review/approve/reject once the record is approved
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("review", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("approve", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("reject", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
# Normal user can now see the record without having the approver role
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("update", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("delete", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted) # not allowed as per ACL!
finally:
# Restore global settings
settings.auth.record_approval = approval
settings.auth.record_approval_required_for = approval_required
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testAccessibleQueryWithRecordApproval(self):
""" Test accessible_query with record approval """
db = current.db
auth = current.auth
acl = auth.permission
s3db = current.s3db
settings = current.deployment_settings
accessible_query = auth.s3_accessible_query
session = current.session
table = s3db.pr_person
otable = s3db.org_organisation
approval = settings.get_auth_record_approval()
approval_required = settings.get_auth_record_approval_required_for()
# Record approval on, but for no tables
settings.auth.record_approval = True
settings.auth.record_approval_required_for = []
try:
AUTHENTICATED = auth.get_system_roles().AUTHENTICATED
# Admin can always see all records
auth.s3_impersonate("admin@example.com")
query = accessible_query("read", table, c="pr", f="person")
self.assertEqual(str(query), "(pr_person.id > 0)")
# User can only see their own records - approved_by not relevant
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c="pr", f="person")
self.assertFalse("approved_by" in str(query))
table = s3db.org_organisation
# Approval not required by default
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c="org", f="organisation")
self.assertEqual(str(query), "(org_organisation.id > 0)")
settings.auth.record_approval_required_for = ["org_organisation"]
# Admin can see all records
auth.s3_impersonate("admin@example.com")
# See only approved records in read
query = accessible_query("read", table, c="org", f="organisation")
self.assertEqual(str(query), "((org_organisation.approved_by IS NOT NULL) OR " \
"(org_organisation.owned_by_user = %s))" % auth.user.id)
# See only unapproved records in review
query = accessible_query("review", table, c="org", f="organisation")
self.assertEqual(str(query), "(org_organisation.approved_by IS NULL)")
# See all records with both
query = accessible_query(["read", "review"], table, c="org", f="organisation")
self.assertEqual(str(query), "(org_organisation.id > 0)")
# User can only see approved records
auth.s3_impersonate("normaluser@example.com")
# See only approved and personally owned records in read
query = accessible_query("read", table, c="org", f="organisation")
self.assertEqual(str(query), "((org_organisation.approved_by IS NOT NULL) OR " \
"(org_organisation.owned_by_user = %s))" % auth.user.id)
# See no records in approve
query = accessible_query("review", table, c="org", f="organisation")
self.assertEqual(str(query), "(org_organisation.id = 0)")
# See only approved and personally owned records with both
query = accessible_query(["read", "review"], table, c="org", f="organisation")
self.assertEqual(str(query), "((org_organisation.approved_by IS NOT NULL) OR " \
"(org_organisation.owned_by_user = %s))" % auth.user.id)
# Give user review and approve permissions on this table
acl.update_acl(AUTHENTICATED,
c="org",
uacl=acl.READ|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
acl.update_acl(AUTHENTICATED,
t="org_organisation",
uacl=acl.READ|acl.CREATE|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
# User can now access unapproved records
auth.s3_impersonate("normaluser@example.com")
# See only approved records in read
query = accessible_query("read", table, c="org", f="organisation")
self.assertTrue("((org_organisation.approved_by IS NOT NULL) OR " \
"(org_organisation.owned_by_user = %s))" % auth.user.id \
in str(query))
# See only unapproved records in review
query = accessible_query("review", table, c="org", f="organisation")
self.assertFalse("(org_organisation.approved_by IS NOT NULL)" in str(query))
self.assertTrue("(org_organisation.approved_by IS NULL)" in str(query))
# See all records with both
query = accessible_query(["read", "approve"], table, c="org", f="organisation")
self.assertTrue("((org_organisation.approved_by IS NOT NULL) OR " \
"(org_organisation.owned_by_user = %s))" % auth.user.id \
in str(query))
self.assertTrue("(org_organisation.approved_by IS NULL)" in str(query))
# Turn off record approval and check the default query
settings.auth.record_approval = False
query = accessible_query("read", table, c="org", f="organisation")
self.assertEqual(str(query), "(org_organisation.id > 0)")
finally:
settings.auth.record_approval = approval
settings.auth.record_approval_required_for = approval_required
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def tearDown(self):
settings = current.deployment_settings
settings.security.policy = self.policy
settings.auth.record_approval = self.approval
settings.auth.record_approval_required_for = self.approval_for
current.auth.s3_impersonate(None)
current.db.rollback()
# =============================================================================
class RealmEntityTests(unittest.TestCase):
""" Test customization hooks for realm entity """
# -------------------------------------------------------------------------
def setUp(self):
db = current.db
s3db = current.s3db
# Create a dummy record
otable = s3db.org_organisation
org = Storage(name="Ownership Test Organisation")
org_id = otable.insert(**org)
org.update(id=org_id)
s3db.update_super(otable, org)
self.org_id = org_id
# Create a dummy record
ftable = s3db.org_office
office = Storage(organisation_id=self.org_id,
name="Ownership Test Office")
office_id = ftable.insert(**office)
office.update(id=office_id)
s3db.update_super(ftable, office)
self.office_id = office_id
# Clear the hooks
tname = "org_organisation"
settings = current.deployment_settings
self.ghook = settings.get_auth_realm_entity()
self.shook = s3db.get_config(tname, "realm_entity")
settings.auth.realm_entity = None
s3db.clear_config(tname, "realm_entity")
self.owned_record = None
# -------------------------------------------------------------------------
def testTableSpecificRealmEntity(self):
""" Test table-specific realm_entity hook """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
s3db.configure(tname, realm_entity = self.realm_entity)
auth.s3_set_record_owner(otable, record, force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
# -------------------------------------------------------------------------
def testGlobalRealmEntity(self):
""" Test global realm_entity hook """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
auth.s3_set_record_owner(otable, record, force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
# -------------------------------------------------------------------------
def testRealmEntityOverride(self):
""" Check whether global realm_entity hook overrides any table-specific setting """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
s3db.configure(tname, realm_entity = self.realm_entity)
settings.auth.realm_entity = self.realm_entity_override
auth.s3_set_record_owner(otable, record, force_update=True)
self.assertEqual(self.owned_record, "checked")
# -------------------------------------------------------------------------
def testSetRealmEntityWithRecord(self):
""" Test the realm entity can be set for a record """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
auth.set_realm_entity(otable, record, force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 5)
# -------------------------------------------------------------------------
def testSetRealmEntityWithRealmComponent(self):
""" Test whether the realm entity of the component updates automatically """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
realm_components = s3db.get_config("org_organisation",
"realm_components", "none")
s3db.configure("org_organisation",
realm_components = ["office"])
try:
otable = s3db.org_organisation
ftable = s3db.org_office
settings.auth.realm_entity = self.realm_entity
record = otable[self.org_id]
record.update_record(realm_entity = None)
record = ftable[self.office_id]
record.update_record(realm_entity = None)
record = otable[self.org_id]
auth.set_realm_entity(otable, record, force_update=True)
tname = "org_organisation"
self.assertEqual(self.owned_record, (tname, record.id))
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 5)
record = ftable[self.office_id]
self.assertEqual(record.realm_entity, 5)
finally:
if realm_components != "none":
s3db.configure("org_organisation",
realm_components=realm_components)
else:
s3db.clear_config("org_organisation", "realm_components")
# -------------------------------------------------------------------------
def testSetRealmEntityWithRecordID(self):
""" Test the realm entity can be set for a record ID """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
auth.set_realm_entity(otable, self.org_id, force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 5)
# -------------------------------------------------------------------------
def testSetRealmEntityWithRecordIDList(self):
""" Test the realm entity can be set for a list of record IDs """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
auth.set_realm_entity(otable, [self.org_id], force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 5)
# -------------------------------------------------------------------------
def testSetRealmEntityWithQuery(self):
""" Test the realm entity can be set for a query """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
query = (otable.id == self.org_id)
auth.set_realm_entity(otable, query, force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 5)
# -------------------------------------------------------------------------
def testSetRealmEntityWithQueryAndOverride(self):
""" Test that realm entity can be overridden by call """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
query = (otable.id == self.org_id)
auth.set_realm_entity(otable, query, entity=4, force_update=True)
self.assertEqual(self.owned_record, None)
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 4)
# -------------------------------------------------------------------------
def testSetRealmEntityWithQueryAndOverrideNone(self):
""" Test that realm entity can be set to None """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
query = (otable.id == self.org_id)
auth.set_realm_entity(otable, query, entity=None, force_update=True)
self.assertEqual(self.owned_record, None)
record = otable[self.org_id]
self.assertEqual(record.realm_entity, None)
# -------------------------------------------------------------------------
def testUpdateSharedFields(self):
""" Test that realm entity gets set in super-entity """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
ftable = s3db.org_office
stable = s3db.org_site
row = ftable[self.office_id]
row.update_record(realm_entity=row["pe_id"])
site_id = row["site_id"]
auth.update_shared_fields(ftable, self.office_id, realm_entity=None)
site = stable[site_id]
self.assertEqual(site["realm_entity"], None)
auth.update_shared_fields(ftable, self.office_id, realm_entity=row["realm_entity"])
site = stable[site_id]
self.assertEqual(site["realm_entity"], row["realm_entity"])
# -------------------------------------------------------------------------
def realm_entity(self, table, row):
""" Dummy method for hook testing """
self.owned_record = (table._tablename, row.id)
return 5
# -------------------------------------------------------------------------
def realm_entity_override(self, table, row):
""" Dummy method for hook testing """
self.owned_record = "checked"
return 6
# -------------------------------------------------------------------------
def tearDown(self):
s3db = current.s3db
settings = current.deployment_settings
# Rollback DB
current.db.rollback()
# Restore the hooks
settings.auth.realm_entity = self.ghook
if self.shook is not None:
s3db.configure("org_organisation", realm_entity=self.shook)
# =============================================================================
class LinkToPersonTests(unittest.TestCase):
""" Test s3_link_to_person """
# -------------------------------------------------------------------------
def setUp(self):
s3db = current.s3db
# Create organisation
otable = s3db.org_organisation
org = Storage(name="LTPRTestOrg")
org_id = otable.insert(**org)
self.assertTrue(org_id is not None)
org["id"] = org_id
s3db.update_super(otable, org)
self.org_id = org_id
self.org_pe_id = org.pe_id
# Create person record
ptable = s3db.pr_person
person = Storage(first_name="TestLTPR",
last_name="User")
person_id = ptable.insert(**person)
self.assertTrue(person_id is not None)
person["id"] = person_id
s3db.update_super(ptable, person)
self.person_id = person_id
self.pe_id = person.pe_id
# Add email contact
ctable = s3db.pr_contact
contact = Storage(pe_id=self.pe_id,
contact_method="EMAIL",
value="testltpr@example.com")
contact_id = ctable.insert(**contact)
self.assertTrue(contact_id is not None)
# -------------------------------------------------------------------------
def testLinkToNewPerson(self):
""" Test linking a user account to a new person record """
auth = current.auth
s3db = current.s3db
# Create new user record
utable = auth.settings.table_user
user = Storage(first_name="TestLTPR2",
last_name="User",
email="testltpr2@example.com",
password="XYZ")
user_id = utable.insert(**user)
self.assertTrue(user_id is not None)
user["id"] = user_id
# Link to person
person_id = auth.s3_link_to_person(user, self.org_id)
# Check the person_id
self.assertNotEqual(person_id, None)
self.assertFalse(isinstance(person_id, list))
self.assertNotEqual(person_id, self.person_id)
# Get the person record
ptable = s3db.pr_person
person = ptable[person_id]
self.assertNotEqual(person, None)
# Check the owner
self.assertEqual(person.realm_entity, self.org_pe_id)
# Check the link
ltable = s3db.pr_person_user
query = (ltable.user_id == user_id) & \
(ltable.pe_id == person.pe_id)
links = current.db(query).select()
self.assertEqual(len(links), 1)
# -------------------------------------------------------------------------
def testLinkToExistingPerson(self):
""" Test linking a user account to a pre-existing person record """
auth = current.auth
s3db = current.s3db
# Create new user record
utable = auth.settings.table_user
user = Storage(first_name="TestLTPR",
last_name="User",
email="testltpr@example.com",
password="XYZ")
user_id = utable.insert(**user)
self.assertTrue(user_id is not None)
user["id"] = user_id
# Link to person record
person_id = auth.s3_link_to_person(user, self.org_id)
# Check the person_id
self.assertNotEqual(person_id, None)
self.assertFalse(isinstance(person_id, list))
self.assertEqual(person_id, self.person_id)
# Get the person record
ptable = s3db.pr_person
person = ptable[person_id]
self.assertNotEqual(person, None)
# Check the link
ltable = s3db.pr_person_user
query = (ltable.user_id == user_id) & \
(ltable.pe_id == person.pe_id)
links = current.db(query).select()
self.assertEqual(len(links), 1)
# -------------------------------------------------------------------------
def testUpdateLinkedPerson(self):
""" Test update of a pre-linked person record upon user account update """
auth = current.auth
s3db = current.s3db
# Create new user record
utable = auth.settings.table_user
user = Storage(first_name="TestLTPR",
last_name="User",
email="testltpr@example.com",
password="XYZ")
user_id = utable.insert(**user)
self.assertTrue(user_id is not None)
user["id"] = user_id
# Link to person
person_id = auth.s3_link_to_person(user, self.org_id)
# Check the person_id
self.assertNotEqual(person_id, None)
self.assertFalse(isinstance(person_id, list))
self.assertEqual(person_id, self.person_id)
# Update the user record
update = Storage(first_name="TestLTPR2",
last_name="User",
email="testltpr2@example.com")
current.db(utable.id == user_id).update(**update)
update["id"] = user_id
# Link to person record again
update_id = auth.s3_link_to_person(user, self.org_id)
# Check unchanged person_id
self.assertEqual(update_id, person_id)
# Check updated person record
ptable = s3db.pr_person
person = ptable[update_id]
self.assertEqual(person.first_name, update["first_name"])
self.assertEqual(person.last_name, update["last_name"])
# Check updated contact record
ctable = s3db.pr_contact
query = (ctable.pe_id == self.pe_id) & \
(ctable.contact_method == "EMAIL")
contacts = current.db(query).select()
self.assertEqual(len(contacts), 2)
emails = [contact.value for contact in contacts]
self.assertTrue(user.email in emails)
self.assertTrue(update.email in emails)
# -------------------------------------------------------------------------
def testMultipleUserRecords(self):
""" Test s3_link_to_person with multiple user accounts """
auth = current.auth
s3db = current.s3db
# Create new user records
utable = auth.settings.table_user
users = []
user1 = Storage(first_name="TestLTPR1",
last_name="User",
email="testltpr1@example.com",
password="XYZ")
user_id = utable.insert(**user1)
self.assertTrue(user_id is not None)
user1["id"] = user_id
users.append(user1)
user2 = Storage(first_name="TestLTPR2",
last_name="User",
email="testltpr2@example.com",
password="XYZ")
user_id = utable.insert(**user2)
self.assertTrue(user_id is not None)
user2["id"] = user_id
users.append(user2)
user3 = Storage(first_name="TestLTPR3",
last_name="User",
email="testltpr3@example.com",
password="XYZ")
user_id = utable.insert(**user3)
self.assertTrue(user_id is not None)
user3["id"] = user_id
users.append(user3)
person_ids = auth.s3_link_to_person(users, self.org_id)
self.assertTrue(isinstance(person_ids, list))
self.assertEqual(len(person_ids), 3)
auth.s3_impersonate("testltpr2@example.com")
pe_id = auth.user.pe_id
ptable = s3db.pr_person
query = (ptable.pe_id == pe_id)
person2 = current.db(query).select().first()
self.assertNotEqual(person2, None)
self.assertTrue(person2.id in person_ids)
# -------------------------------------------------------------------------
def tearDown(self):
current.auth.s3_impersonate(None)
current.db.rollback()
# =============================================================================
class EntityRoleManagerTests(unittest.TestCase):
""" Test the entity role manager """
# -------------------------------------------------------------------------
def setUp(self):
auth = current.auth
# Test-login as system administrator
auth.s3_impersonate("admin@example.com")
self.rm = S3EntityRoleManager()
self.user_id = auth.s3_get_user_id("normaluser@example.com")
self.org_id = 1
auth.s3_assign_role(self.user_id, "staff_reader", for_pe=self.org_id)
auth.s3_assign_role(self.user_id, "project_editor", for_pe=self.org_id)
# -------------------------------------------------------------------------
def testGetAssignedRoles(self):
""" Test get_assigned_roles """
roles = self.rm.get_assigned_roles(entity_id=self.org_id)
self.assertTrue(self.user_id in roles)
assigned_roles = roles[self.user_id]
self.assertEqual(len(assigned_roles), 2)
self.assertTrue("staff_reader" in assigned_roles)
self.assertTrue("project_editor" in assigned_roles)
roles = self.rm.get_assigned_roles(entity_id=self.org_id,
user_id=self.user_id)
self.assertTrue(self.user_id in roles)
assigned_roles = roles[self.user_id]
self.assertEqual(len(assigned_roles), 2)
self.assertTrue("staff_reader" in assigned_roles)
self.assertTrue("project_editor" in assigned_roles)
assigned_roles = self.rm.get_assigned_roles(user_id=self.user_id)
self.assertTrue(all([r in assigned_roles[self.org_id]
for r in ("staff_reader", "project_editor")]))
self.assertEqual(len(assigned_roles[self.org_id]), 2)
roles = self.rm.get_assigned_roles(user_id=self.user_id)
self.assertTrue(self.org_id in roles)
assigned_roles = roles[self.org_id]
self.assertEqual(len(assigned_roles), 2)
self.assertTrue("staff_reader" in assigned_roles)
self.assertTrue("project_editor" in assigned_roles)
self.assertRaises(RuntimeError, self.rm.get_assigned_roles)
# -------------------------------------------------------------------------
def testUpdateRoles(self):
""" Test that before/after works """
before = ("staff_reader", "project_editor")
after = ("survey_reader",)
# Give the user a new set of roles
self.rm.update_roles(self.user_id,
self.org_id,
before,
after)
assigned_roles = self.rm.get_assigned_roles(user_id=self.user_id)
self.assertTrue(self.org_id in assigned_roles)
self.assertTrue(all([r in assigned_roles[self.org_id]
for r in after]))
self.assertEqual(len(assigned_roles[self.org_id]), len(after))
# Reverse the changes
self.rm.update_roles(self.user_id,
self.org_id,
after,
before)
assigned_roles = self.rm.get_assigned_roles(user_id=self.user_id)
self.assertTrue(self.org_id in assigned_roles)
self.assertTrue(all([r in assigned_roles[self.org_id]
for r in before]))
self.assertEqual(len(assigned_roles[self.org_id]), len(before))
# -------------------------------------------------------------------------
def tearDown(self):
auth = current.auth
auth.s3_impersonate(None)
auth.s3_retract_role(self.user_id, "staff_reader", for_pe=self.org_id)
auth.s3_retract_role(self.user_id, "project_editor", for_pe=self.org_id)
current.db.rollback()
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
pass
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
AuthUtilsTests,
SetRolesTests,
RoleAssignmentTests,
RecordOwnershipTests,
ACLManagementTests,
HasPermissionTests,
AccessibleQueryTests,
DelegationTests,
RecordApprovalTests,
RealmEntityTests,
LinkToPersonTests,
EntityRoleManagerTests,
)
# END ========================================================================
|
flavour/tldrmp
|
modules/unit_tests/s3/s3aaa.py
|
Python
|
mit
| 165,150
|
import requests
import json
import bs4
from bs4 import BeautifulSoup
import urllib2
import lxml
import os
####I will try to get a list of the relevant URLs, then 'for loop' through them so I don't have to write a different script for each debate.####
##Saving home page as text file.##
res = requests.get('http://www.presidency.ucsb.edu/debates.php')
print type(res)
print res.raise_for_status()
print len(res.text)
print (res.text[:250])
home = open('DebateTextHomePage.txt', 'wb')
for chunk in res.iter_content(1000000):
home.write(chunk)
home.close()
##Opening newly created home page text file.##
homepage = open('DebateTextHomePage.txt')
homepageHTML = bs4.BeautifulSoup(homepage.read(), "html.parser")
##Defining and executing function to retrieve all of the home page's URLs.##
urllist = []
def geturl(x):
global urllist
for link in x.find_all('a'):
urllist.append(link.get('href'))
geturl(homepageHTML)
print urllist
##Now that I have my URL list, I want to find the specific elements of the list that I want so that I can extract them.##
print len(urllist)
a = urllist.index("http://www.presidency.ucsb.edu/ws/index.php?pid=116995") #Last Democratic debate link. Output is 40.#
b = urllist.index("http://www.presidency.ucsb.edu/ws/index.php?pid=110903") #First Democratic debate link. Output is 48.#
c = urllist.index("http://www.presidency.ucsb.edu/ws/index.php?pid=115148") #Last Republican debate link. Output is 49.#
d = urllist.index("http://www.presidency.ucsb.edu/ws/index.php?pid=110757") #First Republican debate link. Output is 67.#
print a,b,c,d
## Creating Democrat documents for each line
DEMdebates2016list = urllist[a:b+1]
REPUBdebates2016list = urllist[c:d+1]
print DEMdebates2016list
print REPUBdebates2016list
arry = []
x = 1
for i in DEMdebates2016list:
soup = BeautifulSoup(urllib2.urlopen(i), "lxml")
for tag in soup.find_all('p'):
arry.append(tag)
for line in arry:
fname = "dem_%s.txt" % (x)
outpath = os.path.abspath(fname)
with open(outpath, 'w') as f:
f.write(line.text.encode('utf-8') + '\n')
x+=1
|
pplatzman/RedBlue-Classifier
|
WritingFiles/dem/demparse.py
|
Python
|
mit
| 2,139
|
#!/usr/bin/python
import matplotlib
matplotlib.use('Agg')
import pylab
import tensorflow as tf
import numpy as np
import scipy.fftpack
import numpy.fft
import random
import json
import itertools
# Returns one sequence of n_steps.
def getNextTrainingBatch(data, n_steps, iter):
# A random displacement to take the batch from.
data_length = data.shape[0]
if iter:
disp = iter * n_steps
if (disp > len(data[:]) - n_steps - 1):
disp = 0
else:
disp = random.randint(0, len(data[:]) - n_steps - 1)
return data[disp:disp + n_steps]
def getNextTrainingBatchSequence(data, sample_size, iter = None):
result = []
sequence = getNextTrainingBatch(data, sample_size, iter)
x = np.asarray(sequence)
return x
|
keskival/wavenet_synth
|
manage_data.py
|
Python
|
mit
| 771
|
"""engine.SCons.Options.BoolOption
This file defines the option type for SCons implementing true/false values.
Usage example:
opts = Options()
opts.Add(BoolOption('embedded', 'build for an embedded system', 0))
...
if env['embedded'] == 1:
...
"""
#
# Copyright (c) 2001, 2002, 2003, 2004 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Options/BoolOption.py 0.96.1.D001 2004/08/23 09:55:29 knight"
__all__ = ('BoolOption', 'True', 'False')
import string
import SCons.Errors
__true_strings = ('y', 'yes', 'true', 't', '1', 'on' , 'all' )
__false_strings = ('n', 'no', 'false', 'f', '0', 'off', 'none')
# we need this since SCons should work version indepentant
True, False = 1, 0
def _text2bool(val):
"""
Converts strings to True/False depending on the 'truth' expressed by
the string. If the string can't be converted, the original value
will be returned.
See '__true_strings' and '__false_strings' for values considered
'true' or 'false respectivly.
This is usable as 'converter' for SCons' Options.
"""
lval = string.lower(val)
if lval in __true_strings: return True
if lval in __false_strings: return False
raise ValueError("Invalid value for boolean option: %s" % val)
def _validator(key, val, env):
"""
Validates the given value to be either '0' or '1'.
This is usable as 'validator' for SCons' Options.
"""
if not env[key] in (True, False):
raise SCons.Errors.UserError(
'Invalid value for boolean option %s: %s' % (key, env[key]))
def BoolOption(key, help, default):
"""
The input parameters describe a boolen option, thus they are
returned with the correct converter and validator appended. The
'help' text will by appended by '(yes|no) to show the valid
valued. The result is usable for input to opts.Add().
"""
return (key, '%s (yes|no)' % help, default,
_validator, _text2bool)
|
bilke/OpenSG-1.8
|
SConsLocal/scons-local-0.96.1/SCons/Options/BoolOption.py
|
Python
|
lgpl-2.1
| 3,057
|
#!/usr/bin/env python3
import cv2
import numpy as np
from vision import camera_message_framework
import itertools
import time
shape = (500, 500, 3)
size = 1
for dim in shape:
size *= dim
def image_of(axes):
im = np.zeros(shape, dtype=np.uint8)
im[:, :, axes] = 255
return im
black = image_of([]), 'black'
red = image_of([2]), 'red'
green = image_of([1]), 'green'
blue = image_of([0]), 'blue'
yellow = image_of([2, 1]), 'yellow'
cyan = image_of([1, 0]), 'cyan'
pink = image_of([0, 2]), 'pink'
white = image_of([0, 1, 2]), 'white'
images = [black, red, green, blue, yellow, cyan, pink, white]
f = camera_message_framework.Creator('forward', size)
def main():
for im, name in itertools.cycle(images):
f.write_frame(im, int(time.time() * 1000))
print('wrote {}'.format(name))
time.sleep(1)
if __name__ == '__main__':
main()
|
cuauv/software
|
vision/utils/image_ordering_test.py
|
Python
|
bsd-3-clause
| 877
|
def excise(conn, qrelname, tid):
with conn.cursor() as cur:
# Assume 'id' column exists and print that for bookkeeping.
#
# TODO: Instead should find unique constraints and print
# those, or try to print all attributes that are not corrupt.
sql = 'DELETE FROM {0} WHERE ctid = %s RETURNING id'.format(qrelname)
params = (tid,)
cur.execute(sql, params)
row = cur.fetchone()
if row:
return row[0]
return None
|
fdr/pg_corrupt
|
pg_corrupt/excise.py
|
Python
|
bsd-2-clause
| 506
|
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
def plot_buoyancy(cwd=''):
"""
Plotting routine for the cross section of the buoyancy
Args:
cwd (string): current working directory
"""
xx = np.load(cwd + 'data/xaxis.npy')
uend = np.load(cwd + 'data/sdc.npy')
udirk = np.load(cwd + 'data/dirk.npy')
uimex = np.load(cwd + 'data/rkimex.npy')
uref = np.load(cwd + 'data/uref.npy')
usplit = np.load(cwd + 'data/split.npy')
err_split = np.linalg.norm(usplit.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
err_dirk = np.linalg.norm(udirk.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
err_imex = np.linalg.norm(uimex.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
err_sdc = np.linalg.norm(uend.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
assert err_split < 4.821E-02, 'ERROR: split error is too high, got %s' % err_split
assert err_dirk < 1.495e-01, 'ERROR: dirk error is too high, got %s' % err_dirk
assert err_imex < 1.305e-01, 'ERROR: imex error is too high, got %s' % err_imex
assert err_sdc < 9.548e-02, 'ERROR: sdc error is too high, got %s' % err_sdc
print("Estimated discretisation error split explicit: %5.3e" % err_split)
print("Estimated discretisation error of DIRK: %5.3e" % err_dirk)
print("Estimated discretisation error of RK-IMEX: %5.3e" % err_imex)
print("Estimated discretisation error of SDC: %5.3e" % err_sdc)
fs = 8
rcParams['figure.figsize'] = 5.0, 2.5
plt.figure()
plt.plot(xx[:, 5], udirk[2, :, 5], '--', color='g', markersize=fs - 2, label='DIRK(4)', dashes=(3, 3))
plt.plot(xx[:, 5], uend[2, :, 5], '-', color='b', label='SDC(4)')
plt.plot(xx[:, 5], uimex[2, :, 5], '--', color='r', markersize=fs - 2, label='IMEX(4)', dashes=(3, 3))
plt.legend(loc='lower left', fontsize=fs, prop={'size': fs})
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
plt.xlabel('x [km]', fontsize=fs, labelpad=0)
plt.ylabel('Bouyancy', fontsize=fs, labelpad=1)
filename = 'data/boussinesq.png'
plt.savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
plot_buoyancy()
|
Parallel-in-Time/pySDC
|
pySDC/projects/FastWaveSlowWave/plotgmrescounter_boussinesq.py
|
Python
|
bsd-2-clause
| 2,298
|
# coding: utf-8
# In[1]:
from __future__ import print_function
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.datasets.data_utils import get_file
import numpy as np
import random, sys
'''
Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
chars = set(text)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 20
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i : i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# In[4]:
print("X.shape: %s, y.shape: %s" % (X.shape, y.shape))
# In[11]:
# build the model: 2 stacked LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(len(chars), 512, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(512, 512, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(512, len(chars)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# In[ ]:
# helper function to sample an index from a probability array
def sample(a, temperature=1.0):
a = np.log(a)/temperature
a = np.exp(a)/np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1,a,1))
# train the model, output generated text after each iteration
history = []
outputs = []
for iteration in range(1, 5):
print()
print('-' * 50)
print('Iteration', iteration)
history.append(model.fit(X, y, batch_size=1000, nb_epoch=1))
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index : start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for iteration in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
outputs.append(generated)
print()
|
napsternxg/haiku_rnn
|
Keras LSTM RNN Text Gen.py
|
Python
|
gpl-2.0
| 3,463
|
import datetime
# Remove seconds from epoch to get a epoch ending by 0 second
def floor_second_epoch(epoch):
date = datetime.datetime.fromtimestamp(epoch / 1000)
return epoch - (date.second * 1000)
# Convert an epoch to a string
def epoch_to_string(epoch):
date = datetime.datetime.fromtimestamp(epoch / 1000)
month = str(date.month) if date.month > 9 else '0' + str(date.month)
day = str(date.day) if date.day > 9 else '0' + str(date.day)
hour = str(date.hour) if date.hour > 9 else '0' + str(date.hour)
minute = str(date.minute) if date.minute > 9 else '0' + str(date.minute)
second = str(date.second) if date.second > 9 else '0' + str(date.second)
return str(date.year) + '-' + month + '-' + day + '_' + hour + '_' + minute + '_' + second
# Create a dir name from an epoch and a period
def epoch_to_dir_name(epoch, period):
date = datetime.datetime.fromtimestamp(epoch / 1000 + period)
hour = str(date.hour) if date.hour > 9 else '0' + str(date.hour)
minute = str(date.minute) if date.minute > 9 else '0' + str(date.minute)
second = str(date.second) if date.second > 9 else '0' + str(date.second)
return epoch_to_string(epoch) + '-' + hour + '_' + minute + '_' + second
|
Leelow/upper-body-clustering
|
lib/date.py
|
Python
|
mit
| 1,235
|
import deep_architect.contrib.misc.search_spaces.tensorflow.dnn as css_dnn
import deep_architect.contrib.misc.search_spaces.tensorflow.cnn2d as css_cnn2d
from deep_architect.contrib.misc.search_spaces.tensorflow.common import D
import deep_architect.modules as mo
def get_dnn_search_space_fn(num_classes):
return mo.SearchSpaceFactory(
lambda: css_dnn.dnn_net(num_classes)).get_search_space
def get_conv_search_space_fn(num_classes):
def search_space_fn():
h_num_spatial_reductions = D([2, 3, 4])
h_pool_op = D(['max', 'avg'])
return mo.siso_sequential([
css_cnn2d.conv_net(h_num_spatial_reductions),
css_cnn2d.spatial_squeeze(h_pool_op, D([num_classes]))
])
return mo.SearchSpaceFactory(search_space_fn).get_search_space
name_to_search_space_fn = {
'dnn': get_dnn_search_space_fn,
'conv': get_conv_search_space_fn,
}
|
negrinho/deep_architect
|
examples/benchmarks/search_spaces.py
|
Python
|
mit
| 910
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.adaptivescalar import AdaptiveScalarEncoder
from nupic.encoders.base import EncoderResult
class DeltaEncoder(AdaptiveScalarEncoder):
"""
This is an implementation of a delta encoder. The delta encoder encodes differences between
successive scalar values instead of encoding the actual values. It returns an actual value when
decoding and not a delta.
"""
def __init__(self, w, minval=None, maxval=None, periodic=False, n=0, radius=0,
resolution=0, name=None, verbosity=0, clipInput=True, forced=False):
"""[ScalarEncoder class method override]"""
self._learningEnabled = True
self._stateLock = False
self.width = 0
self.encoders = None
self.description = []
self.name = name
if periodic:
#Delta scalar encoders take non-periodic inputs only
raise Exception('Delta encoder does not encode periodic inputs')
assert n!=0 #An adaptive encoder can only be intialized using n
self._adaptiveScalarEnc = AdaptiveScalarEncoder(w=w, n=n, minval=minval,
maxval=maxval, clipInput=True, name=name, verbosity=verbosity, forced=forced)
self.width+=self._adaptiveScalarEnc.getWidth()
self.n = self._adaptiveScalarEnc.n
self._prevAbsolute = None #how many inputs have been sent to the encoder?
self._prevDelta = None
def encodeIntoArray(self, input, output, learn=None):
if learn is None:
learn = self._learningEnabled
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
output[0:self.n] = 0
else:
#make the first delta zero so that the delta ranges are not messed up.
if self._prevAbsolute==None:
self._prevAbsolute= input
delta = input - self._prevAbsolute
self._adaptiveScalarEnc.encodeIntoArray(delta, output, learn)
if not self._stateLock:
self._prevAbsolute = input
self._prevDelta = delta
return output
############################################################################
def setStateLock(self, lock):
self._stateLock = lock
############################################################################
def setFieldStats(self, fieldName, fieldStatistics):
pass
############################################################################
def isDelta(self):
return True
############################################################################
def getBucketIndices(self, input, learn=None):
return self._adaptiveScalarEnc.getBucketIndices(input, learn)
############################################################################
def getBucketInfo(self, buckets):
return self._adaptiveScalarEnc.getBucketInfo(buckets)
############################################################################
def topDownCompute(self, encoded):
"""[ScalarEncoder class method override]"""
#Decode to delta scalar
if self._prevAbsolute==None or self._prevDelta==None:
return [EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))]
ret = self._adaptiveScalarEnc.topDownCompute(encoded)
if self._prevAbsolute != None:
ret = [EncoderResult(value=ret[0].value+self._prevAbsolute,
scalar=ret[0].scalar+self._prevAbsolute,
encoding=ret[0].encoding)]
# ret[0].value+=self._prevAbsolute
# ret[0].scalar+=self._prevAbsolute
return ret
|
0x0all/nupic
|
py/nupic/encoders/delta.py
|
Python
|
gpl-3.0
| 4,460
|
# Copyright 2010-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class AuthProtocol(object):
def __init__(self, app, conf):
self.conf = conf
self.app = app
def __call__(self, env, start_response):
token = self._get_user_token_from_header(env)
user_headers = self._get_info_from_token(token)
self._add_headers(env, user_headers)
return self.app(env, start_response)
def _header_to_env_var(self, key):
"""Convert header to wsgi env variable.
:param key: http header name (ex. 'X-Auth-Token')
:return wsgi env variable name (ex. 'HTTP_X_AUTH_TOKEN')
"""
return 'HTTP_%s' % key.replace('-', '_').upper()
def _add_headers(self, env, headers):
"""Add http headers to environment."""
for (k, v) in headers.iteritems():
env_key = self._header_to_env_var(k)
env[env_key] = v
def get_admin_token(self):
return "ABCDEF0123456789"
def _get_info_from_token(self, token):
if token.startswith("admin"):
role = "admin,%s" % token
else:
role = token
return {
'X_IDENTITY_STATUS': 'Confirmed',
'X_TENANT_ID': token,
'X_TENANT_NAME': token,
'X_USER_ID': token,
'X_USER_NAME': token,
'X_ROLE': role,
}
def _get_header(self, env, key, default=None):
# Copied from keystone.
env_key = self._header_to_env_var(key)
return env.get(env_key, default)
def _get_user_token_from_header(self, env):
token = self._get_header(env, 'X-Auth-Token',
self._get_header(env, 'X-Storage-Token'))
if token:
return token
else:
raise RuntimeError('Unable to find token in headers')
def filter_factory(global_conf, **local_conf):
"""Fakes a keystone filter."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return AuthProtocol(app, conf)
return auth_filter
|
CMSS-BCRDB/RDS
|
trove/tests/fakes/keystone.py
|
Python
|
apache-2.0
| 2,659
|
# coding: utf-8
import os
from difflib import SequenceMatcher
from unicodedata import normalize
def remove_diacritics(s):
try:
s = normalize('NFKD', s)
except TypeError:
s = normalize('NFKD', unicode(s, "utf-8"))
finally:
return s.encode('ASCII', 'ignore').decode('ASCII')
def remove_suffixes_and_prefixes(state):
for term in [" PROVINCE", "PROVINCIA DE ", "STATE OF ", " STATE"]:
state = state.replace(term, "")
return state
def remove_non_alpha_characters(s):
# Remove caracteres como vírgulas, pontos, parênteses etc
return "".join([c for c in s if c.isalpha() or c in [" ", "-"]])
def similarity_ratio(value1, value2):
s = SequenceMatcher(None, value1, value2)
return s.ratio()
def normalize_value(s):
s = remove_diacritics(s)
s = s.upper()
s = remove_non_alpha_characters(s)
return s
class States:
def __init__(self):
self._states = {}
self.load()
def load(self):
with open(os.path.dirname(os.path.realpath(__file__)) + "/assets/states_abbrev.csv") as fp:
for row in fp.readlines():
row = row.strip()
if "," in row:
name, abbrev = row.split(",")
name = remove_diacritics(name)
name = name.upper()
self._states[name] = abbrev
def get_state_abbrev(self, state):
return self._states.get(state)
def get_state_abbrev_by_similarity(self, state):
similar = [
(similarity_ratio(name, state), abbrev)
for name, abbrev in self._states.items()
]
similar = sorted(similar)
if similar[-1][0] > 0.8:
return similar[-1][1]
def normalize(self, state):
state = remove_suffixes_and_prefixes(state)
state_abbrev = (
self.get_state_abbrev(state) or
self.get_state_abbrev_by_similarity(state) or
state
)
return state_abbrev
def is_a_match(original, normalized, states=None):
original = normalize_value(original)
normalized = normalize_value(normalized)
if original == normalized:
return True
if similarity_ratio(original, normalized) > 0.8:
return True
if states and hasattr(states, 'normalize'):
original = states.normalize(original)
normalized = states.normalize(normalized)
if original == normalized:
return True
return False
STATES = States()
def has_conflicts(original_aff, normaff):
if original_aff:
conflicts = []
for label in ["country_iso_3166", "state", "city"]:
original = original_aff.get(label)
normalized = normaff.get(label)
if original and normalized:
states = STATES if label == "state" else None
if is_a_match(original, normalized, states):
continue
conflicts.append((label, original, normalized))
return conflicts
|
scieloorg/xylose
|
xylose/aff_validator.py
|
Python
|
bsd-2-clause
| 3,052
|
"""
Perceptron
w = w + N * (d(k) - y) * x(k)
Using perceptron network for oil analysis, with Measuring of 3 parameters
that represent chemical characteristics we can classify the oil, in p1 or p2
p1 = -1
p2 = 1
"""
import random
class Perceptron:
def __init__(
self,
sample: list[list[float]],
target: list[int],
learning_rate: float = 0.01,
epoch_number: int = 1000,
bias: float = -1,
) -> None:
"""
Initializes a Perceptron network for oil analysis
:param sample: sample dataset of 3 parameters with shape [30,3]
:param target: variable for classification with two possible states -1 or 1
:param learning_rate: learning rate used in optimizing.
:param epoch_number: number of epochs to train network on.
:param bias: bias value for the network.
>>> p = Perceptron([], (0, 1, 2))
Traceback (most recent call last):
...
ValueError: Sample data can not be empty
>>> p = Perceptron(([0], 1, 2), [])
Traceback (most recent call last):
...
ValueError: Target data can not be empty
>>> p = Perceptron(([0], 1, 2), (0, 1))
Traceback (most recent call last):
...
ValueError: Sample data and Target data do not have matching lengths
"""
self.sample = sample
if len(self.sample) == 0:
raise ValueError("Sample data can not be empty")
self.target = target
if len(self.target) == 0:
raise ValueError("Target data can not be empty")
if len(self.sample) != len(self.target):
raise ValueError("Sample data and Target data do not have matching lengths")
self.learning_rate = learning_rate
self.epoch_number = epoch_number
self.bias = bias
self.number_sample = len(sample)
self.col_sample = len(sample[0]) # number of columns in dataset
self.weight: list = []
def training(self) -> None:
"""
Trains perceptron for epochs <= given number of epochs
:return: None
>>> data = [[2.0149, 0.6192, 10.9263]]
>>> targets = [-1]
>>> perceptron = Perceptron(data,targets)
>>> perceptron.training() # doctest: +ELLIPSIS
('\\nEpoch:\\n', ...)
...
"""
for sample in self.sample:
sample.insert(0, self.bias)
for i in range(self.col_sample):
self.weight.append(random.random())
self.weight.insert(0, self.bias)
epoch_count = 0
while True:
has_misclassified = False
for i in range(self.number_sample):
u = 0
for j in range(self.col_sample + 1):
u = u + self.weight[j] * self.sample[i][j]
y = self.sign(u)
if y != self.target[i]:
for j in range(self.col_sample + 1):
self.weight[j] = (
self.weight[j]
+ self.learning_rate
* (self.target[i] - y)
* self.sample[i][j]
)
has_misclassified = True
# print('Epoch: \n',epoch_count)
epoch_count = epoch_count + 1
# if you want control the epoch or just by error
if not has_misclassified:
print(("\nEpoch:\n", epoch_count))
print("------------------------\n")
# if epoch_count > self.epoch_number or not error:
break
def sort(self, sample: list[float]) -> None:
"""
:param sample: example row to classify as P1 or P2
:return: None
>>> data = [[2.0149, 0.6192, 10.9263]]
>>> targets = [-1]
>>> perceptron = Perceptron(data,targets)
>>> perceptron.training() # doctest: +ELLIPSIS
('\\nEpoch:\\n', ...)
...
>>> perceptron.sort([-0.6508, 0.1097, 4.0009]) # doctest: +ELLIPSIS
('Sample: ', ...)
classification: P...
"""
if len(self.sample) == 0:
raise ValueError("Sample data can not be empty")
sample.insert(0, self.bias)
u = 0
for i in range(self.col_sample + 1):
u = u + self.weight[i] * sample[i]
y = self.sign(u)
if y == -1:
print(("Sample: ", sample))
print("classification: P1")
else:
print(("Sample: ", sample))
print("classification: P2")
def sign(self, u: float) -> int:
"""
threshold function for classification
:param u: input number
:return: 1 if the input is greater than 0, otherwise -1
>>> data = [[0],[-0.5],[0.5]]
>>> targets = [1,-1,1]
>>> perceptron = Perceptron(data,targets)
>>> perceptron.sign(0)
1
>>> perceptron.sign(-0.5)
-1
>>> perceptron.sign(0.5)
1
"""
return 1 if u >= 0 else -1
samples = [
[-0.6508, 0.1097, 4.0009],
[-1.4492, 0.8896, 4.4005],
[2.0850, 0.6876, 12.0710],
[0.2626, 1.1476, 7.7985],
[0.6418, 1.0234, 7.0427],
[0.2569, 0.6730, 8.3265],
[1.1155, 0.6043, 7.4446],
[0.0914, 0.3399, 7.0677],
[0.0121, 0.5256, 4.6316],
[-0.0429, 0.4660, 5.4323],
[0.4340, 0.6870, 8.2287],
[0.2735, 1.0287, 7.1934],
[0.4839, 0.4851, 7.4850],
[0.4089, -0.1267, 5.5019],
[1.4391, 0.1614, 8.5843],
[-0.9115, -0.1973, 2.1962],
[0.3654, 1.0475, 7.4858],
[0.2144, 0.7515, 7.1699],
[0.2013, 1.0014, 6.5489],
[0.6483, 0.2183, 5.8991],
[-0.1147, 0.2242, 7.2435],
[-0.7970, 0.8795, 3.8762],
[-1.0625, 0.6366, 2.4707],
[0.5307, 0.1285, 5.6883],
[-1.2200, 0.7777, 1.7252],
[0.3957, 0.1076, 5.6623],
[-0.1013, 0.5989, 7.1812],
[2.4482, 0.9455, 11.2095],
[2.0149, 0.6192, 10.9263],
[0.2012, 0.2611, 5.4631],
]
exit = [
-1,
-1,
-1,
1,
1,
-1,
1,
-1,
1,
1,
-1,
1,
-1,
-1,
-1,
-1,
1,
1,
1,
1,
-1,
1,
1,
1,
1,
-1,
-1,
1,
-1,
1,
]
if __name__ == "__main__":
import doctest
doctest.testmod()
network = Perceptron(
sample=samples, target=exit, learning_rate=0.01, epoch_number=1000, bias=-1
)
network.training()
print("Finished training perceptron")
print("Enter values to predict or q to exit")
while True:
sample: list = []
for i in range(len(samples[0])):
user_input = input("value: ").strip()
if user_input == "q":
break
observation = float(user_input)
sample.insert(i, observation)
network.sort(sample)
|
TheAlgorithms/Python
|
neural_network/perceptron.py
|
Python
|
mit
| 6,890
|
##############################################################################
#
# Copyright (c) 2004 TINY SPRL. (http://tiny.be) All Rights Reserved.
# Fabien Pinckaers <fp@tiny.Be>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import osv, fields
import time
from datetime import datetime, date
class marsans_room(osv.osv):
_inherit='product.product'
#_name = 'marsans.room'
_columns = {
'isroom': fields.boolean('Is a Room'),
'price_night': fields.integer('Price Night'),
'hotel': fields.many2one('marsans.hotel','Hotel', ondelete='cascade' ),
'm2': fields.integer('M2'),
'beds': fields.selection([('1','One Bed'),('2','Two Beds'),('21','Double Bed'),('3','Double Bed and Extra bed')],'Beds'),
'ac': fields.boolean('A/C'),
'bathroom': fields.binary('Bathroom'),
'bed_image':fields.binary('Bed image'),
}
marsans_room()
class marsans_city(osv.osv):
"""(NULL)"""
_name = 'marsans.city'
_columns = {
'name': fields.char('City', size=32, required=True),
'hotels': fields.one2many('marsans.hotel', 'city_id', 'Hotels'),
'image': fields.binary('Image'),
}
marsans_city()
class marsans_hotel(osv.osv):
def name_get (self, cr, uid, ids, context=None):
res=[]
records = self.browse(cr,uid,ids)
for r in records:
res.append((r.id,r.city_id.name+","+r.name))
return res
"""(NULL)"""
_name = 'marsans.hotel'
_columns = {
'name': fields.char('Name', size=32, required=True),
'image': fields.binary('Image'),
#'price': fields.integer('Price', required=True),
'city_id': fields.many2one('marsans.city', 'City', required=True, ondelete='restrict'),
'gallery': fields.one2many('marsans.gallery.hotel', 'hotel_id', 'Gallery'),
'partner': fields.many2one('res.partner', 'Supplier'),
'rooms': fields.one2many('product.product','hotel','Rooms'),
}
marsans_hotel()
class marsans_gallery_hotel(osv.osv):
"""(NULL)"""
_name = 'marsans.gallery.hotel'
_columns = {
'name': fields.char('Name', size=32, required=True),
'hotel_id': fields.many2one('marsans.hotel', 'Hotel', required=True,ondelete='cascade' ),
'image': fields.binary('Image'),
}
marsans_gallery_hotel()
class marsans_travel(osv.osv):
def _compute_total_price(self, cr, uid, ids, field_name, arg, context=None):
result={}
for h in self.browse(cr, uid, ids, context=None):
total=0
for s in h.scales:
total=total+s.total
#print s.total
result[h.id]=total
return result
"""(NULL)"""
_inherit = 'sale.order'
#_name = 'marsans.travel'
_columns = {
#'name': fields.char('Name', size=64, required=True),
#'partner_id': fields.many2one('res.partner', 'Client'),
'scales': fields.one2many('sale.order.line', 'order_id', 'Scales',domain=[('isscale','=',True)]),
'total': fields.function(_compute_total_price, type='float', method=True, string='Total',store=False),
'istravel' : fields.boolean('Is a travel'),
}
marsans_travel()
class marsans_scale(osv.osv):
"""(NULL)"""
def _compute_days(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for h in self.browse(cr, uid, ids, context=None):
if h.isscale == True:
dias = 0
d1 = datetime.strptime(h.end, '%Y-%m-%d %H:%M:%S')
d0 = datetime.strptime(h.date_i, '%Y-%m-%d %H:%M:%S')
dias = abs((d1-d0).days)
result[h.id] = dias
return result
def _compute_price(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for h in self.browse(cr, uid, ids, context=None):
if h.isscale == True:
result[h.id] = h.days * h.price
self.write(cr,uid,h.id,{'price_unit': h.price,'product_uom_qty': h.days})
#print result[h.id]
else:
result[h.id] = 0
return result
def get_hotels(self, cr, uid, ids, city, context=None): #https://doc.openerp.com/6.1/developer/03_modules_3/#onchange-event-link
c=self.pool.get('marsans.city').browse(cr,uid,city)
return {'domain':{'hotel': [('city_id','=',c.id)]}}
def get_rooms(self, cr, uid, ids, hotel, context=None): #https://doc.openerp.com/6.1/developer/03_modules_3/#onchange-event-link
h=self.pool.get('marsans.hotel').browse(cr,uid,hotel)
return {'domain':{'product_id':[('hotel','=',h.id)]}}
def check_datei(self,cr,uid,ids,date_i,order_id,id,context=None):
res={}
escalas=self.pool.get('sale.order').browse(cr,uid,order_id).scales
for i in escalas:
#print i.date_i
d1 = datetime.strptime(i.end, '%Y-%m-%d %H:%M:%S')
d0 = datetime.strptime(i.date_i, '%Y-%m-%d %H:%M:%S')
di = datetime.strptime(date_i, '%Y-%m-%d %H:%M:%S')
if (id != i.id) & (d0 < di) & (d1 > di):
print i.order_n
res['warning'] = {'title': 'Error de fechas', 'message': 'Este dia ya forma parte de otra escala'}
res['value'] = {'date_i':fields.datetime.now()}
return res
def check_datee(self,cr,uid,ids,end,order_id,id,context=None):
res={}
escalas=self.pool.get('sale.order').browse(cr,uid,order_id).scales
for i in escalas:
#print i.date_i
d1 = datetime.strptime(i.end, '%Y-%m-%d %H:%M:%S')
d0 = datetime.strptime(i.date_i, '%Y-%m-%d %H:%M:%S')
de = datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
if (id != i.id) & (d0 < de) & (d1 > de):
print i.order_n
res['warning'] = {'title': 'Error de fechas', 'message': 'Este dia ya forma parte de otra escala'}
res['value'] = {'end':fields.datetime.now()}
return res
def check_iend(self, cr, uid, ids, context=None):
for h in self.browse(cr, uid, ids, context=None):
if h.isscale == True:
d1 = datetime.strptime(h.end, '%Y-%m-%d %H:%M:%S')
d0 = datetime.strptime(h.date_i, '%Y-%m-%d %H:%M:%S')
if d0 > d1:
return False
return True
#_name = 'marsans.scale'
_inherit='sale.order.line'
_order = "order_id,order_n"
_columns = {
'isscale': fields.boolean('Is Scale'),
'order_n': fields.integer('Order', required=True),
#'travel_id': fields.many2one('sale.order','Travel',required=True),
'city': fields.many2one('marsans.city','City'),
#'hotel': fields.selection(_def_sel_hotels,string='Hotel'),
'hotel': fields.many2one('marsans.hotel', 'Hotel', required=True, ondelete='restrict' ),
#'room': fields.many2one('product.product', 'Room' , required=True),
'price': fields.related('product_id','price_night',type='integer', string='Price', store=False),
'end': fields.datetime('End Day', required=True),
'date_i': fields.datetime('Start Day', required=True),
'days': fields.function(_compute_days, type='integer', method=True, string='Days',store=True),
'total': fields.function(_compute_price, type='float', method=True, string='Total',store=False),
}
_defaults = {
'name':'Escala',
}
_constraints = [
(check_iend,'El dia que comenca una escala no pot ser posterior al final',['datei','end']),
]
marsans_scale()
|
xxjcaxx/openerp-learning-module
|
marsans/marsans.py
|
Python
|
gpl-2.0
| 7,847
|
"""
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from html.parser import HTMLParser
from urllib.request import urlretrieve
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
# %%
# Reuters Dataset related routines
# --------------------------------
#
# The dataset used in this example is Reuters-21578 as provided by the UCI ML
# repository. It will be automatically downloaded and uncompressed on first
# run.
class ReutersParser(HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
sys.stdout.write(
'\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb))
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
sys.stdout.write('\r')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
# %%
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
alternate_sign=False)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(max_iter=5),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [('{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
# %%
# Plot results
# ------------
#
# The plot represents the learning curve of the classifier: the evolution
# of classification accuracy over the course of the mini-batches. Accuracy is
# measured on the first 1000 samples, held out as a validation set.
#
# To limit the memory consumption, we queue examples up to a fixed amount
# before feeding them to the learner.
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = [stats['total_fit_time']
for cls_name, stats in sorted(cls_stats.items())]
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
plt.setp(plt.xticks()[1], rotation=30)
autolabel(rectangles)
plt.tight_layout()
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.tight_layout()
plt.show()
|
glemaitre/scikit-learn
|
examples/applications/plot_out_of_core_classification.py
|
Python
|
bsd-3-clause
| 13,417
|
suite = {
"name" : "mx",
"libraries" : {
# ------------- Libraries -------------
"JACOCOAGENT" : {
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/jacoco/jacocoagent-0.7.1-1.jar"],
"sha1" : "2f73a645b02e39290e577ce555f00b02004650b0",
},
"JACOCOREPORT" : {
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/jacoco/jacocoreport-0.7.1-2.jar"],
"sha1" : "a630436391832d697a12c8f7daef8655d7a1efd2",
},
"FINDBUGS_DIST" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/findbugs-3.0.0.zip",
"http://sourceforge.net/projects/findbugs/files/findbugs/3.0.0/findbugs-3.0.0.zip/download",
],
"sha1" : "6e56d67f238dbcd60acb88a81655749aa6419c5b",
},
"SIGTEST" : {
"urls" : [
"http://hg.netbeans.org/binaries/A7674A6D78B7FEA58AF76B357DAE6EA5E3FDFBE9-apitest.jar",
],
"sha1" : "a7674a6d78b7fea58af76b357dae6ea5e3fdfbe9",
},
"CODESNIPPET-DOCLET" : {
"urls" : [
"http://repo1.maven.org/maven2/org/apidesign/javadoc/codesnippet-doclet/0.5/codesnippet-doclet-0.5.jar",
],
"sha1" : "e9f37916a0ee0f2f6dc0c1d4ae0ce6e7c7a6e874",
},
"JUNIT" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/junit-4.11.jar",
"https://search.maven.org/remotecontent?filepath=junit/junit/4.11/junit-4.11.jar",
],
"sha1" : "4e031bb61df09069aeb2bffb4019e7a5034a4ee0",
"eclipse.container" : "org.eclipse.jdt.junit.JUNIT_CONTAINER/4",
"sourceUrls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/junit-4.11-sources.jar",
"https://search.maven.org/remotecontent?filepath=junit/junit/4.11/junit-4.11-sources.jar",
],
"sourceSha1" : "28e0ad201304e4a4abf999ca0570b7cffc352c3c",
"dependencies" : ["HAMCREST"],
"licence" : "CPL",
"maven" : {
"groupId" : "junit",
"artifactId" : "junit",
"version" : "4.11",
}
},
"CHECKSTYLE" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/checkstyle-6.0-all.jar",
"jar:http://sourceforge.net/projects/checkstyle/files/checkstyle/6.0/checkstyle-6.0-bin.zip/download!/checkstyle-6.0/checkstyle-6.0-all.jar",
],
"sha1" : "2bedc7feded58b5fd65595323bfaf7b9bb6a3c7a",
"licence" : "LGPLv21",
"maven" : {
"groupId" : "com.puppycrawl.tools",
"artifactId" : "checkstyle",
"version" : "6.0",
}
},
"HAMCREST" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/hamcrest-core-1.3.jar",
"https://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
"sha1" : "42a25dc3219429f0e5d060061f71acb49bf010a0",
"sourceUrls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/hamcrest-core-1.3-sources.jar",
"https://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3-sources.jar",
],
"sourceSha1" : "1dc37250fbc78e23a65a67fbbaf71d2e9cbc3c0b",
"licence" : "BSD-new",
"maven" : {
"groupId" : "org.hamcrest",
"artifactId" : "hamcrest-core",
"version" : "1.3",
}
},
},
"licenses" : {
"GPLv2-CPE" : {
"name" : "GNU General Public License, version 2, with the Classpath Exception",
"url" : "http://openjdk.java.net/legal/gplv2+ce.html"
},
"BSD-new" : {
"name" : "New BSD License (3-clause BSD license)",
"url" : "http://opensource.org/licenses/BSD-3-Clause"
},
"CPL" : {
"name" : "Common Public License Version 1.0",
"url" : "http://opensource.org/licenses/cpl1.0.txt"
},
"LGPLv21" : {
"name" : "GNU Lesser General Public License, version 2.1",
"url" : "http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html"
},
},
"projects" : {
"com.oracle.mxtool.junit" : {
"subDir" : "java",
"sourceDirs" : ["src"],
"dependencies" : [
"JUNIT",
],
"javaCompliance" : "1.8",
},
},
}
|
smarr/mxtool
|
mx.mx/suite.py
|
Python
|
gpl-2.0
| 4,144
|
# coding: utf-8
from __future__ import unicode_literals
import progressbar
import sys
import pprint
import os
import django
import pprint
sys.path.append("..")
sys.path.append(".")
# Configuración inicial de django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "suite.settings")
django.setup()
from escuelas import models
from django.contrib.auth.models import User
password = 'asdasd123'
print "Reiniciando contraseñas..."
bar = progressbar.ProgressBar()
for user in bar(User.objects.order_by('username')):
user.set_password(password)
user.save()
|
Dte-ba/suite-backend
|
scripts/reiniciar_contraseñas.py
|
Python
|
gpl-3.0
| 565
|
"""
The module series exports functions to compute power series solutions with
Newton's method in double, double double, or quad double precision.
"""
def replace_symbol(pol, idx):
"""
In the polynomial pol,
replaces the first symbol by the symbol at place idx.
"""
from phcpy.phcpy2c2 import py2c_syscon_string_of_symbols
sbl = py2c_syscon_string_of_symbols()
var = sbl.split(' ')
result = pol.replace(var[0], var[idx-1])
return result
def substitute_symbol(pols, idx):
"""
Given in pols is a list of polynomials,
replaces the first symbol by the symbol at place idx.
"""
if idx == 1:
return pols
else:
result = []
for pol in pols:
result.append(replace_symbol(pol, idx))
return result
def standard_newton_series(pols, sols, idx=1, maxdeg=4, nbr=4, verbose=True):
r"""
Computes series in standard double precision for the polynomials
in *pols*, where the leading coefficients are the solutions in *sols*.
On entry are the following five parameters:
*pols*: a list of string representations of polynomials,
*sols*: a list of solutions of the polynomials in *pols*,
*idx*: index of the series parameter, by default equals 1,
*maxdeg*: maximal degree of the series,
*nbr*: number of steps with Newton's method,
*verbose*: whether to write intermediate output to screen or not.
On return is a list of lists of strings. Each lists of strings
represents the series solution for the variables in the list *pols*.
"""
from phcpy.solver import number_of_symbols
from phcpy.interface import store_standard_solutions
from phcpy.interface import store_standard_system, load_standard_system
from phcpy.phcpy2c2 import py2c_standard_Newton_series as newton
from phcpy.phcpy2c2 import py2c_syspool_standard_size as poolsize
from phcpy.phcpy2c2 import py2c_syspool_copy_to_standard_container
from phcpy.phcpy2c2 import py2c_syspool_standard_clear
nbsym = number_of_symbols(pols)
if verbose:
print "the polynomials :"
for pol in pols:
print pol
print "Number of variables :", nbsym
store_standard_system(pols, nbvar=nbsym)
store_standard_solutions(nbsym, sols)
fail = newton(idx, maxdeg, nbr, int(verbose))
size = (-1 if fail else poolsize())
if verbose:
if size == -1:
print "An error occurred in the execution of Newton's method."
else:
print "Computed %d series solutions." % size
result = []
for k in range(1, size+1):
py2c_syspool_copy_to_standard_container(k)
sersol = load_standard_system()
result.append(substitute_symbol(sersol, idx))
py2c_syspool_standard_clear()
return result
def dobldobl_newton_series(pols, sols, idx=1, maxdeg=4, nbr=4, verbose=True):
r"""
Computes series in double double precision for the polynomials
in *pols*, where the leading coefficients are the solutions in *sols*.
On entry are the following five parameters:
*pols*: a list of string representations of polynomials,
*sols*: a list of solutions of the polynomials in *pols*,
*idx*: index of the series parameter, by default equals 1,
*maxdeg*: maximal degree of the series,
*nbr*: number of steps with Newton's method,
*verbose*: whether to write intermediate output to screen or not.
On return is a list of lists of strings. Each lists of strings
represents the series solution for the variables in the list *pols*.
"""
from phcpy.solver import number_of_symbols
from phcpy.interface import store_dobldobl_solutions
from phcpy.interface import store_dobldobl_system, load_dobldobl_system
from phcpy.phcpy2c2 import py2c_dobldobl_Newton_series as newton
from phcpy.phcpy2c2 import py2c_syspool_dobldobl_size as poolsize
from phcpy.phcpy2c2 import py2c_syspool_copy_to_dobldobl_container
from phcpy.phcpy2c2 import py2c_syspool_dobldobl_clear
nbsym = number_of_symbols(pols)
if verbose:
print "the polynomials :"
for pol in pols:
print pol
print "Number of variables :", nbsym
store_dobldobl_system(pols, nbvar=nbsym)
store_dobldobl_solutions(nbsym, sols)
fail = newton(idx, maxdeg, nbr, int(verbose))
size = (-1 if fail else poolsize())
if verbose:
if size == -1:
print "An error occurred in the execution of Newton's method."
else:
print "Computed %d series solutions." % size
result = []
for k in range(1, size+1):
py2c_syspool_copy_to_dobldobl_container(k)
sersol = load_dobldobl_system()
result.append(substitute_symbol(sersol, idx))
py2c_syspool_dobldobl_clear()
return result
def quaddobl_newton_series(pols, sols, idx=1, maxdeg=4, nbr=4, verbose=True):
r"""
Computes series in quad double precision for the polynomials
in *pols*, where the leading coefficients are the solutions in *sols*.
On entry are the following five parameters:
*pols*: a list of string representations of polynomials,
*sols*: a list of solutions of the polynomials in *pols*,
*idx*: index of the series parameter, by default equals 1,
*maxdeg*: maximal degree of the series,
*nbr*: number of steps with Newton's method,
*verbose*: whether to write intermediate output to screen or not.
On return is a list of lists of strings. Each lists of strings
represents the series solution for the variables in the list *pols*.
"""
from phcpy.solver import number_of_symbols
from phcpy.interface import store_quaddobl_solutions
from phcpy.interface import store_quaddobl_system, load_quaddobl_system
from phcpy.phcpy2c2 import py2c_quaddobl_Newton_series as newton
from phcpy.phcpy2c2 import py2c_syspool_quaddobl_size as poolsize
from phcpy.phcpy2c2 import py2c_syspool_copy_to_quaddobl_container
from phcpy.phcpy2c2 import py2c_syspool_quaddobl_clear
nbsym = number_of_symbols(pols)
if verbose:
print "the polynomials :"
for pol in pols:
print pol
print "Number of variables :", nbsym
store_quaddobl_system(pols, nbvar=nbsym)
store_quaddobl_solutions(nbsym, sols)
fail = newton(idx, maxdeg, nbr, int(verbose))
size = (-1 if fail else poolsize())
if verbose:
if size == -1:
print "An error occurred in the execution of Newton's method."
else:
print "Computed %d series solutions." % size
result = []
for k in range(1, size+1):
py2c_syspool_copy_to_quaddobl_container(k)
sersol = load_quaddobl_system()
result.append(substitute_symbol(sersol, idx))
py2c_syspool_quaddobl_clear()
return result
def checkin_newton_power_series(nbsym, lser, idx):
"""
Given in nbsym the number of symbols in the polynomial system,
in lser the list of leading terms in the series and
in idx the index of the parameter, returns True
if nbsym = len(lser) if idx == 0, or otherwise
if nbsym = len(lser) + 1 if idx != 0.
An error message is written and False is returned
if the above conditions are not satisfied.
"""
if idx == 0:
okay = (nbsym == len(lser))
else:
okay = (nbsym == len(lser) + 1)
if not okay:
if idx == 0:
dim = nbsym
else:
dim = nbsym - 1
print 'Wrong length of list of leading terms, should be', \
str(dim) + '.'
return okay
def standard_newton_power_series(pols, lser, idx=1, maxdeg=4, nbr=4, \
checkin=True, verbose=True):
r"""
Computes series in standard double precision for the polynomials
in *pols*, where the leading terms are given in the list *lser*.
On entry are the following five parameters:
*pols*: a list of string representations of polynomials,
*lser*: a list of polynomials in the series parameter (e.g.: t),
for use as start terms in Newton's method,
*idx*: index of the series parameter, by default equals 1,
*maxdeg*: maximal degree of the power series,
*nbr*: number of steps with Newton's method,
*checkin*: checks whether the number of symbols in pols matches
the length of the list lser if idx == 0, or is one less than
the length of the list lser if idx != 0. If the conditions are
not satisfied, then an error message is printed and lser is returned.
*verbose*: whether to write intermediate output to screen or not.
On return is a list of lists of strings. Each lists of strings
represents the series solution for the variables in the list *pols*.
"""
from phcpy.solver import number_of_symbols
from phcpy.interface import store_standard_system, load_standard_system
from phcpy.phcpy2c2 import py2c_standard_Newton_power_series as newton
from phcpy.phcpy2c2 import py2c_syspool_standard_init
from phcpy.phcpy2c2 import py2c_syspool_standard_create
from phcpy.phcpy2c2 import py2c_syspool_standard_size as poolsize
from phcpy.phcpy2c2 import py2c_syspool_copy_to_standard_container
from phcpy.phcpy2c2 import py2c_syspool_standard_clear
nbsym = number_of_symbols(pols)
if verbose:
print "the polynomials :"
for pol in pols:
print pol
print "Number of variables :", nbsym
if checkin:
if not checkin_newton_power_series(nbsym, lser, idx):
return lser
store_standard_system(lser, nbvar=1)
py2c_syspool_standard_init(1);
py2c_syspool_standard_create(1);
store_standard_system(pols, nbvar=nbsym)
fail = newton(idx, maxdeg, nbr, int(verbose))
size = (-1 if fail else poolsize())
if verbose:
if size == -1:
print "An error occurred in the execution of Newton's method."
else:
print "Computed one series solution."
py2c_syspool_copy_to_standard_container(1)
result = load_standard_system()
result = substitute_symbol(result, idx)
py2c_syspool_standard_clear()
return result
def dobldobl_newton_power_series(pols, lser, idx=1, maxdeg=4, nbr=4, \
checkin=True, verbose=True):
r"""
Computes series in double double precision for the polynomials
in *pols*, where the leading terms are given in the list *lser*.
On entry are the following five parameters:
*pols*: a list of string representations of polynomials,
*lser*: a list of polynomials in the series parameter (e.g.: t),
for use as start terms in Newton's method,
*idx*: index of the series parameter, by default equals 1,
*maxdeg*: maximal degree of the series,
*nbr*: number of steps with Newton's method,
*checkin*: checks whether the number of symbols in pols matches
the length of the list lser if idx == 0, or is one less than
the length of the list lser if idx != 0. If the conditions are
not satisfied, then an error message is printed and lser is returned.
*verbose*: whether to write intermediate output to screen or not.
On return is a list of lists of strings. Each lists of strings
represents the series solution for the variables in the list *pols*.
"""
from phcpy.solver import number_of_symbols
from phcpy.interface import store_dobldobl_system, load_dobldobl_system
from phcpy.phcpy2c2 import py2c_dobldobl_Newton_power_series as newton
from phcpy.phcpy2c2 import py2c_syspool_dobldobl_init
from phcpy.phcpy2c2 import py2c_syspool_dobldobl_create
from phcpy.phcpy2c2 import py2c_syspool_dobldobl_size as poolsize
from phcpy.phcpy2c2 import py2c_syspool_copy_to_dobldobl_container
from phcpy.phcpy2c2 import py2c_syspool_dobldobl_clear
nbsym = number_of_symbols(pols)
if verbose:
print "the polynomials :"
for pol in pols:
print pol
print "Number of variables :", nbsym
if checkin:
if not checkin_newton_power_series(nbsym, lser, idx):
return lser
store_dobldobl_system(lser, nbvar=1)
py2c_syspool_dobldobl_init(1);
py2c_syspool_dobldobl_create(1);
store_dobldobl_system(pols, nbvar=nbsym)
fail = newton(idx, maxdeg, nbr, int(verbose))
size = (-1 if fail else poolsize())
if verbose:
if size == -1:
print "An error occurred in the execution of Newton's method."
else:
print "Computed one series solution."
py2c_syspool_copy_to_dobldobl_container(1)
result = load_dobldobl_system()
result = substitute_symbol(result, idx)
py2c_syspool_dobldobl_clear()
return result
def quaddobl_newton_power_series(pols, lser, idx=1, maxdeg=4, nbr=4, \
checkin=True, verbose=True):
r"""
Computes series in quad double precision for the polynomials
in *pols*, where the leading terms are given in the list *lser*.
On entry are the following five parameters:
*pols*: a list of string representations of polynomials,
*lser*: a list of polynomials in the series parameter (e.g.: t),
for use as start terms in Newton's method,
*idx*: index of the series parameter, by default equals 1,
*maxdeg*: maximal degree of the series,
*nbr*: number of steps with Newton's method,
*checkin*: checks whether the number of symbols in pols matches
the length of the list lser if idx == 0, or is one less than
the length of the list lser if idx != 0. If the conditions are
not satisfied, then an error message is printed and lser is returned.
*verbose*: whether to write intermediate output to screen or not.
On return is a list of lists of strings. Each lists of strings
represents the series solution for the variables in the list *pols*.
"""
from phcpy.solver import number_of_symbols
from phcpy.interface import store_quaddobl_system, load_quaddobl_system
from phcpy.phcpy2c2 import py2c_quaddobl_Newton_power_series as newton
from phcpy.phcpy2c2 import py2c_syspool_quaddobl_init
from phcpy.phcpy2c2 import py2c_syspool_quaddobl_create
from phcpy.phcpy2c2 import py2c_syspool_quaddobl_size as poolsize
from phcpy.phcpy2c2 import py2c_syspool_copy_to_quaddobl_container
from phcpy.phcpy2c2 import py2c_syspool_quaddobl_clear
nbsym = number_of_symbols(pols)
if verbose:
print "the polynomials :"
for pol in pols:
print pol
print "Number of variables :", nbsym
if checkin:
if not checkin_newton_power_series(nbsym, lser, idx):
return lser
store_quaddobl_system(lser, nbvar=1)
py2c_syspool_quaddobl_init(1);
py2c_syspool_quaddobl_create(1);
store_quaddobl_system(pols, nbvar=nbsym)
fail = newton(idx, maxdeg, nbr, int(verbose))
size = (-1 if fail else poolsize())
if verbose:
if size == -1:
print "An error occurred in the execution of Newton's method."
else:
print "Computed one series solution."
py2c_syspool_copy_to_quaddobl_container(1)
result = load_quaddobl_system()
result = substitute_symbol(result, idx)
py2c_syspool_quaddobl_clear()
return result
def make_fractions(pols):
"""
Given a list of string representations for the numerator and
denominator polynomials in its even and odd numbered indices,
returns a list of string representations for the fractions.
"""
result = []
(nbr, idx) = (len(pols)/2, 0)
for k in range(nbr):
(num, den) = (pols[idx], pols[idx+1])
idx = idx + 2
frac = '(' + num[:-1] + ')/(' + den[:-1] + ')'
result.append(frac)
return result
def rational_forms(pols):
"""
Given a list of lists of string representations for the numerators
and denominators, returns the proper rational representations for
the Pade approximants.
"""
result = []
for pol in pols:
result.append(make_fractions(pol))
return result
def standard_pade_approximants(pols, sols, idx=1, numdeg=2, dendeg=2, \
nbr=4, verbose=True):
r"""
Computes Pade approximants based on the series in standard double
precision for the polynomials in *pols*, where the leading
coefficients of the series are the solutions in *sols*.
On entry are the following seven parameters:
*pols*: a list of string representations of polynomials,
*sols*: a list of solutions of the polynomials in *pols*,
*idx*: index of the series parameter, by default equals 1,
*numdeg*: the degree of the numerator,
*dendeg*: the degree of the denominator,
*nbr*: number of steps with Newton's method,
*verbose*: whether to write intermediate output to screen or not.
On return is a list of lists of strings. Each lists of strings
represents the series solution for the variables in the list *pols*.
"""
from phcpy.solver import number_of_symbols
from phcpy.interface import store_standard_solutions
from phcpy.interface import store_standard_system, load_standard_system
from phcpy.phcpy2c2 \
import py2c_standard_Pade_approximant as Pade_approximants
from phcpy.phcpy2c2 import py2c_syspool_standard_size as poolsize
from phcpy.phcpy2c2 import py2c_syspool_copy_to_standard_container
from phcpy.phcpy2c2 import py2c_syspool_standard_clear
nbsym = number_of_symbols(pols)
if verbose:
print "the polynomials :"
for pol in pols:
print pol
print "Number of variables :", nbsym
store_standard_system(pols, nbvar=nbsym)
store_standard_solutions(nbsym, sols)
fail = Pade_approximants(idx, numdeg, dendeg, nbr, int(verbose))
size = (-1 if fail else poolsize())
if verbose:
if size == -1:
print "An error occurred in the Pade constructor."
else:
print "Computed %d Pade approximants." % size
result = []
for k in range(1, size+1):
py2c_syspool_copy_to_standard_container(k)
sersol = load_standard_system()
substsersol = substitute_symbol(sersol, idx)
result.append(make_fractions(substsersol))
py2c_syspool_standard_clear()
return result
def dobldobl_pade_approximants(pols, sols, idx=1, numdeg=2, dendeg=2, \
nbr=4, verbose=True):
r"""
Computes Pade approximants based on the series in double double
precision for the polynomials in *pols*, where the leading
coefficients of the series are the solutions in *sols*.
On entry are the following seven parameters:
*pols*: a list of string representations of polynomials,
*sols*: a list of solutions of the polynomials in *pols*,
*idx*: index of the series parameter, by default equals 1,
*numdeg*: the degree of the numerator,
*dendeg*: the degree of the denominator,
*nbr*: number of steps with Newton's method,
*verbose*: whether to write intermediate output to screen or not.
On return is a list of lists of strings. Each lists of strings
represents the series solution for the variables in the list *pols*.
"""
from phcpy.solver import number_of_symbols
from phcpy.interface import store_dobldobl_solutions
from phcpy.interface import store_dobldobl_system, load_dobldobl_system
from phcpy.phcpy2c2 \
import py2c_dobldobl_Pade_approximant as Pade_approximants
from phcpy.phcpy2c2 import py2c_syspool_dobldobl_size as poolsize
from phcpy.phcpy2c2 import py2c_syspool_copy_to_dobldobl_container
from phcpy.phcpy2c2 import py2c_syspool_dobldobl_clear
nbsym = number_of_symbols(pols)
if verbose:
print "the polynomials :"
for pol in pols:
print pol
print "Number of variables :", nbsym
store_dobldobl_system(pols, nbvar=nbsym)
store_dobldobl_solutions(nbsym, sols)
fail = Pade_approximants(idx, numdeg, dendeg, nbr, int(verbose))
size = (-1 if fail else poolsize())
if verbose:
if size == -1:
print "An error occurred in the Pade constructor."
else:
print "Computed %d Pade approximants." % size
result = []
for k in range(1, size+1):
py2c_syspool_copy_to_dobldobl_container(k)
sersol = load_dobldobl_system()
substsersol = substitute_symbol(sersol, idx)
result.append(make_fractions(substsersol))
py2c_syspool_dobldobl_clear()
return result
def quaddobl_pade_approximants(pols, sols, idx=1, numdeg=2, dendeg=2, \
nbr=4, verbose=True):
r"""
Computes Pade approximants based on the series in quad double
precision for the polynomials in *pols*, where the leading
coefficients of the series are the solutions in *sols*.
On entry are the following seven parameters:
*pols*: a list of string representations of polynomials,
*sols*: a list of solutions of the polynomials in *pols*,
*idx*: index of the series parameter, by default equals 1,
*numdeg*: the degree of the numerator,
*dendeg*: the degree of the denominator,
*nbr*: number of steps with Newton's method,
*verbose*: whether to write intermediate output to screen or not.
On return is a list of lists of strings. Each lists of strings
represents the series solution for the variables in the list *pols*.
"""
from phcpy.solver import number_of_symbols
from phcpy.interface import store_quaddobl_solutions
from phcpy.interface import store_quaddobl_system, load_quaddobl_system
from phcpy.phcpy2c2 \
import py2c_quaddobl_Pade_approximant as Pade_approximants
from phcpy.phcpy2c2 import py2c_syspool_quaddobl_size as poolsize
from phcpy.phcpy2c2 import py2c_syspool_copy_to_quaddobl_container
from phcpy.phcpy2c2 import py2c_syspool_quaddobl_clear
nbsym = number_of_symbols(pols)
if verbose:
print "the polynomials :"
for pol in pols:
print pol
print "Number of variables :", nbsym
store_quaddobl_system(pols, nbvar=nbsym)
store_quaddobl_solutions(nbsym, sols)
fail = Pade_approximants(idx, numdeg, dendeg, nbr, int(verbose))
size = (-1 if fail else poolsize())
if verbose:
if size == -1:
print "An error occurred in the Pade constructor."
else:
print "Computed %d Pade approximants." % size
result = []
for k in range(1, size+1):
py2c_syspool_copy_to_quaddobl_container(k)
sersol = load_quaddobl_system()
substsersol = substitute_symbol(sersol, idx)
result.append(make_fractions(substsersol))
py2c_syspool_quaddobl_clear()
return result
def viviani(prc='d'):
"""
Returns the system which stores the Viviani curve,
with some solutions intersected with a plane,
in double ('d'), double double ('dd'), or quad double('qd') precision.
"""
from phcpy.solver import solve
pols = ['(1-s)*y + s*(y-1);', \
'x^2 + y^2 + z^2 - 4;' , \
'(x-1)^2 + y^2 - 1;', \
's;']
sols = solve(pols, verbose=False, precision=prc)
print "The solutions on the Viviani curve :"
for sol in sols:
print sol
return (pols[:3], sols)
def viviani2(precision='d'):
"""
Computes the power series expansion for the Viviani curve,
from a natural paramter perspective.
The default precision is double ('d'). Other precisions
are double double ('dd') and quad double ('qd').
"""
pols = [ '2*t^2 - x;', \
'x^2 + y^2 + z^2 - 4;' , \
'(x-1)^2 + y^2 - 1;']
lser = [ '2*t^2;', '2*t;', '2;']
if precision == 'd':
nser = standard_newton_power_series(pols, lser, maxdeg=12, nbr=8)
elif precision == 'dd':
nser = dobldobl_newton_power_series(pols, lser, maxdeg=12, nbr=8)
elif precision == 'qd':
nser = quaddobl_newton_power_series(pols, lser, maxdeg=12, nbr=8)
else:
print 'invalid argument for the precision'
print nser
def apollonius(precision='d'):
"""
Test on computing the power series at a double solution
for the problem of Apolonius.
The parameter t is the fourth variable, whence we call
Newton's method with idx equal to four.
"""
pols = [ 'x1^2 + 3*x2^2 - r^2 - 2*r - 1;', \
'x1^2 + 3*x2^2 - r^2 - 4*x1 - 2*r + 3;', \
'3*t^2 + x1^2 - 6*t*x2 + 3*x2^2 - r^2 + 6*t - 2*x1 - 6*x2 + 2*r + 3;']
lser1 = ['1;', '1 + 0.536*t;', '1 + 0.904*t;']
lser2 = ['1;', '1 + 7.464*t;', '1 + 11.196*t;']
if precision == 'd':
nser1 = standard_newton_power_series(pols, lser1, idx=4, nbr=7)
nser2 = standard_newton_power_series(pols, lser2, idx=4, nbr=7)
elif precision == 'dd':
nser1 = dobldobl_newton_power_series(pols, lser1, idx=4, nbr=7)
nser2 = dobldobl_newton_power_series(pols, lser2, idx=4, nbr=7)
elif precision == 'qd':
nser1 = quaddobl_newton_power_series(pols, lser1, idx=4, nbr=7)
nser2 = quaddobl_newton_power_series(pols, lser2, idx=4, nbr=7)
else:
print 'invalid argument for the precision'
print nser1
print nser2
def example4pade(prc='d'):
"""
The function f(z) = ((1 + 1/2*z)/(1 + 2*z))^(1/2) is
a solution x(s) of (1-s)*(x^2 - 1) + s*(3*x^2 - 3/2) = 0
"""
pols = ['(x^2 - 1)*(1-s) + (3*x^2 - 3/2)*s;', 's;']
from phcpy.solver import solve
sols = solve(pols, verbose=False, precision=prc)
for sol in sols:
print sol
if prc == 'd':
sers = standard_newton_series(pols[:1], sols, idx=2)
else:
sers = dobldobl_newton_series(pols[:1], sols, idx=2)
print 'the series solutions :'
for ser in sers:
print ser
if prc == 'd':
pade = standard_pade_approximants(pols[:1], sols, idx=2)
elif prc == 'dd':
pade = dobldobl_pade_approximants(pols[:1], sols, idx=2)
elif prc == 'qd':
pade = quaddobl_pade_approximants(pols[:1], sols, idx=2)
else:
print 'wrong value for the precision'
print 'the Pade approximants :'
for pad in pade:
print pad
def test(precision='d'):
"""
Tests the application of Newton's method to compute power
series solutions of a polynomial system.
"""
(pols, sols) = viviani(precision)
if precision == 'd':
sersols = standard_newton_series(pols, sols)
elif precision == 'dd':
sersols = dobldobl_newton_series(pols, sols)
elif precision == 'qd':
sersols = quaddobl_newton_series(pols, sols)
for series in sersols:
print series
if __name__ == "__main__":
#test('d')
#test('dd')
#test('qd')
#viviani2('qd')
#apollonius()
example4pade('dd')
|
janverschelde/PHCpack
|
src/Python/PHCpy2/phcpy/series.py
|
Python
|
gpl-3.0
| 26,702
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2012 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "avanzosc_foreign_vat",
"version": "1.0",
"depends": ["base_vat"],
"author": "AvanzOSC",
"website" : "http://www.avanzosc.com",
"category": "category",
"description": """
This module provide :
* It has the same functionality as before with the European countries.
* With foreign countries, checks if the country code is in the ISO list, it doesn't check if the CIF is correct.
""",
"init_xml": [],
'update_xml': [],
'demo_xml': [],
'installable': True,
'active': False,
# 'certificate': 'certificate',
}
|
avanzosc/avanzosc6.1
|
avanzosc_foreign_vat/__openerp__.py
|
Python
|
agpl-3.0
| 1,571
|
import numpy as np
def force1(x,y,z,p,t=0):
''' force on place x,y,z under velocity p at time t '''
# no force if the speed is more than 5
if np.max(np.abs(p))>5:
return [0,0,0]
return [0,0,0.5]
def force2(x,y,z,p,t=0):
# test force
if t>70:
return (0,0,0)
if z>200 and x<y:
return (4,2,-14)
if z>200 and x>=y:
return (4,2,4)
return (0,0,0.3)
def damperX(x,y,z,p,t=0):
alpha=np.arctan2(x,y)
if alpha>0 and alpha<np.pi*1/3: return(1,1,0.2)
if alpha>1 and alpha<1.4: return(1,1,0.1)
if z>20:
return(0.4,0.4,0)
return (0.99,1.0,0.99)
def damper2(x,y,z,p,t=0):
alpha=np.arctan2(x,y)
if alpha>np.pi*1/3: return(1,1,0.5)
if alpha<-np.pi*1/2: return(1,1,0.01)
return (1,1,1)
# drehen
def force3(x,y,z,p,t=0):
k=0.002
m=0.0005
#if x**2+y**2 >3600:
if x**2+y**2 >100:
return(k*y-m*x,-k*x-m*y,-0.5)
else:
return(k*y,-k*x,-0.2)
return (0,0,-1)
# ausbreiten und schnell fallen
def force4(x,y,z,p,t=0):
#return(0.,0,-1)
return(0.01*y,0.01*x,-1)
def nodamper(x,y,z,p,t=0):
return (0.9,0.9,1)
def simpleforce(x,y,z,p,t=0):
if z<-20 and z>-50:
return (-0.01*x, -0.01*y,-0.5)
if z<=-70:
return (0.01*np.sin(z*np.pi/20)*x, 0.01*np.sin(z*np.pi/20)*y,-0.1)
return (0,0,-1)
#-------------------------
# mail vom 10.07.
# angepasst auf negative hoehen
def force4(x,y,z,p,t=0):
if z<-10 and z>-30:
return (0.1*x, 0.1*y,-0.1)
if z<=-30 and z>-130:
return (0,0,-0.5)
if z<=-130 and z>-220:
return (-0.1*x,-0.1*y,-0.1)
return (0,0,-1)
def nodamper(x,y,z,p,t=0):
if z<-300:
return (0,0,0)
return (0.9,0.9,1)
force=force4
myforce=force4
mydamper=damper2
|
microelly2/Animation
|
flowlib.py
|
Python
|
gpl-2.0
| 1,625
|
from setuptools import setup
setup(
name = 'convis',
packages = ['convis','convis.filters'],
version = '0.6.4',
install_requires = ["matplotlib", "litus", "numpy", "future"],
description = 'Convolutional Vision Model',
author = 'Jacob Huth',
author_email = 'jahuth@uos.de',
url = 'https://github.com/jahuth/convis',
download_url = 'https://github.com/jahuth/convis/tarball/0.6.4',
keywords = ['vision model', 'retina model'],
classifiers = [],
)
|
jahuth/convis
|
setup.py
|
Python
|
gpl-3.0
| 468
|
#!/usr/bin/env python
import sys
import argparse
from get_alleles import get_nth_allele
def find_var_info(sequences):
'''
Finds the length of the flanking sequences left and right of a variant.
Also returns the length of the minor allele
Input
- sequences: dictionary of sequences
Output
- var_info: dictionary of flanking lengths for each sequence
'''
var_info = {}
for seq_name in sequences:
sequence = sequences[seq_name]
minor_allele = get_nth_allele(sequence,2)
left_bracket_index = sequence.find('[')
right_bracket_index = sequence.find(']')
start = left_bracket_index
right_flank_length = len(sequence[right_bracket_index+1:])
length = len(minor_allele)
stop = length - right_flank_length
var_info[seq_name] = (start,stop,length)
return var_info
def unit_test():
# Set up test
seq_name = "test"
sequence = "AAAA[GA/T]AAAAA"
major_allele = "AAAAGAAAAAA"
real_length = len(major_allele)
real_left = 4
right_right = 5
sequences = { seq_name : sequence }
var_info = find_var_info(sequences)
flanks = var_info[seq_name]
start = flanks[0]
stop = flanks[1]
length = flanks[2]
assert(start == 4)
assert(stop == 5)
assert(length == 11)
print("Unit tests passed!")
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False,description=
'''
Author: Sean La. Given a list of sequences through STDIN in the form "SEQ_NAME=W[X/Y]Z" where W,X,Y, and Z\
are nucleotide subsequences, prints a line in the form "SEQ_NAME START STOP LENGTH" where START and STOP\
are the start and stop indices of the variant X and LENGTH is the length of the allele containing X.
''')
parser.add_argument('-h','--help',action='help',default=argparse.SUPPRESS,
help='Show this help message and exit.')
parser.add_argument('-t','--test',action='store_true',help=
"""
Perform unit tests for this script.
""")
parser.add_argument('-i','--input',metavar='INPUT',help=
"""
Path to the input file. If not set, then takes input through STDIN.
""")
parser.add_argument('-o','--output',metavar='OUTPUT',help=
"""
Path to the output file. If not set, outputs through STDOUT.
""")
args = parser.parse_args()
if args.test:
unit_test()
sys.exit(0)
if args.input:
input_stream = open(args.input,'r')
else:
input_stream = sys.stdin
sequences = {}
for line in input_stream:
tokens = line.split("=")
if len(tokens) == 2:
name = tokens[0]
sequence = tokens[1]
sequences[name] = sequence
input_stream.close()
var_info = find_var_info(sequences)
info_lines = []
for seq_name in var_info:
start = var_info[seq_name][0]
stop = var_info[seq_name][1]
length = var_info[seq_name][2]
info_lines.append( "%s %d %d %d" % (seq_name, start, stop, length) )
if args.output:
with open(args.output,'w') as output_stream:
for line in info_lines:
output_stream.write( "%s\n" % (line) )
else:
for line in info_lines:
print(line)
|
NCBI-Hackathons/PSST
|
src/find_var_info.py
|
Python
|
mit
| 2,932
|
"""
This file opens two files, received_data.dat and original_data.dat, and
compare the bits on them. It prints the number of different bits, and
the BER obtained.
"""
import matplotlib.pyplot as plt
import numpy as np
def get_different_bits(byte1, byte2):
xor = byte1 ^ byte2
result = 0
while xor != 0:
result = result + (xor % 2)
xor = xor // 2
return result
with open("received_data.dat", "rb") as f:
with open("original_data.dat", "rb") as g:
received_byte = int.from_bytes(f.read(1), byteorder='big')
original_byte = int.from_bytes(g.read(1), byteorder='big')
result = 0
i = 0
results = []
while received_byte != 0:
if (original_byte == 0):
g.seek(0)
original_byte = int.from_bytes(g.read(1), byteorder='big')
result = result + get_different_bits(received_byte, original_byte)
if result == 7:
print(received_byte, original_byte)
results.append(result)
received_byte = int.from_bytes(f.read(1), byteorder='big')
original_byte = int.from_bytes(g.read(1), byteorder='big')
i += 1
# Sometimes last byte messes all up an throws an error when it
# shouldn't be one. With this 2 lines, we don't use the last byte
# at all.
del results[-1]
result = results[-1]
print("# different bits:", result)
print("BER:", result / i)
plt.plot(range(0, len(results)), results)
plt.xlabel('# bytes')
plt.ylabel('# Erroneous bits')
plt.title('Error evolution')
plt.grid(True)
plt.show()
|
auburus/GNURadio-guide
|
utils/calculate_BER.py
|
Python
|
gpl-3.0
| 1,636
|
# Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014-2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base import routers
from . import api
router = routers.DefaultRouter(trailing_slash=False)
router.register(r"feedback", api.FeedbackViewSet, base_name="feedback")
|
bdang2012/taiga-back-casting
|
taiga/feedback/routers.py
|
Python
|
agpl-3.0
| 1,013
|
import json
import uuid
from django import template
from django.template.loader import render_to_string
from django.conf import settings
from django.core.urlresolvers import reverse
register = template.Library()
@register.simple_tag(takes_context=True)
def lazy_tag(context, tag, *args, **kwargs):
"""
Lazily loads a template tag after the page has loaded. Requires jQuery
(for now).
Usage:
{% load lazy_tags %}
{% lazy_tag 'tag_lib.tag_name' arg1 arg2 kw1='test' kw2='hello' %}
Args:
tag (str): the tag library and tag name separated by a period. For a
template tag named `do_thing` in a tag library named `thing_tags`
the `tag` argument would be `'thing_tags.doc_thing'`.
*args: arguments to be passed to the template tag.
**kwargs: keyword arguments to be passed to the template tag.
"""
# Gross hack to pollute the parent context so lazy_tag_data can be accessed
# from the lazy_tags_javascript tag
c = context.dicts[0]
if not c.get('lazy_tag_data'):
c['lazy_tag_data'] = {}
tag_id = str(uuid.uuid4())
c['lazy_tag_data'][tag_id] = {
'tag': tag,
'args': json.dumps(args or []),
'kwargs': json.dumps(kwargs or {}),
}
return render_to_string('lazy_tags/lazy_tag.html', {
'id': tag_id,
'STATIC_URL': settings.STATIC_URL,
})
@register.simple_tag(takes_context=True)
def lazy_tags_js(context):
"""Outputs the necessary JavaScript to load tags over AJAX."""
tag_url = reverse("lazy_tag")
lazy_tag_data = context.get('lazy_tag_data')
error_message = 'An error occurred.'
if hasattr(settings, 'LAZY_TAGS_ERROR_MESSAGE'):
error_message = getattr(settings, 'LAZY_TAGS_ERROR_MESSAGE')
return render_to_string('lazy_tags/lazy_tags_js.html', {
'lazy_tag_data': lazy_tag_data,
'tag_url': tag_url,
'error_message': error_message,
})
|
janusnic/django-lazy-tags
|
lazy_tags/templatetags/lazy_tags.py
|
Python
|
mit
| 1,966
|
# -*- coding: utf-8 -*-
# © 2015 Elico corp (www.elico-corp.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import controllers
|
Elico-Corp/odoo-addons
|
website_redirect_to_shop/__init__.py
|
Python
|
agpl-3.0
| 162
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for type_info module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.py2tf.pyct import anno
from tensorflow.contrib.py2tf.pyct import context
from tensorflow.contrib.py2tf.pyct import parser
from tensorflow.contrib.py2tf.pyct.static_analysis import access
from tensorflow.contrib.py2tf.pyct.static_analysis import live_values
from tensorflow.contrib.py2tf.pyct.static_analysis import type_info
from tensorflow.python.client import session
from tensorflow.python.platform import test
from tensorflow.python.training import training
class ScopeTest(test.TestCase):
def test_basic(self):
scope = type_info.Scope(None)
self.assertFalse(scope.hasval('foo'))
scope.setval('foo', 'bar')
self.assertTrue(scope.hasval('foo'))
self.assertFalse(scope.hasval('baz'))
def test_nesting(self):
scope = type_info.Scope(None)
scope.setval('foo', '')
child = type_info.Scope(scope)
self.assertTrue(child.hasval('foo'))
self.assertTrue(scope.hasval('foo'))
child.setval('bar', '')
self.assertTrue(child.hasval('bar'))
self.assertFalse(scope.hasval('bar'))
class TypeInfoResolverTest(test.TestCase):
def _parse_and_analyze(self, test_fn, namespace, arg_types=None):
node, source = parser.parse_entity(test_fn)
ctx = context.EntityContext(
namer=None,
source_code=source,
source_file=None,
namespace=namespace,
arg_values=None,
arg_types=arg_types,
recursive=True)
node = access.resolve(node, ctx)
node = live_values.resolve(node, ctx, {})
node = type_info.resolve(node, ctx)
node = live_values.resolve(node, ctx, {})
return node
def test_constructor_detection(self):
def test_fn():
opt = training.GradientDescentOptimizer(0.1)
return opt
node = self._parse_and_analyze(test_fn, {'training': training})
call_node = node.body[0].body[0].value
self.assertEquals(training.GradientDescentOptimizer,
anno.getanno(call_node, 'type'))
self.assertEquals((training.__name__, 'GradientDescentOptimizer'),
anno.getanno(call_node, 'type_fqn'))
def test_class_members_of_detected_constructor(self):
def test_fn():
opt = training.GradientDescentOptimizer(0.1)
opt.minimize(0)
node = self._parse_and_analyze(test_fn, {'training': training})
method_call = node.body[0].body[1].value.func
self.assertEquals(training.GradientDescentOptimizer.minimize,
anno.getanno(method_call, 'live_val'))
def test_class_members_in_with_stmt(self):
def test_fn(x):
with session.Session() as sess:
sess.run(x)
node = self._parse_and_analyze(test_fn, {'session': session})
constructor_call = node.body[0].body[0].items[0].context_expr
self.assertEquals(session.Session, anno.getanno(constructor_call, 'type'))
self.assertEquals((session.__name__, 'Session'),
anno.getanno(constructor_call, 'type_fqn'))
method_call = node.body[0].body[0].body[0].value.func
self.assertEquals(session.Session.run, anno.getanno(method_call,
'live_val'))
def test_constructor_data_dependent(self):
def test_fn(x):
if x > 0:
opt = training.GradientDescentOptimizer(0.1)
else:
opt = training.GradientDescentOptimizer(0.01)
opt.minimize(0)
node = self._parse_and_analyze(test_fn, {'training': training})
method_call = node.body[0].body[1].value.func
self.assertFalse(anno.hasanno(method_call, 'live_val'))
def test_parameter_class_members(self):
def test_fn(opt):
opt.minimize(0)
node = self._parse_and_analyze(test_fn, {})
method_call = node.body[0].body[0].value.func
self.assertFalse(anno.hasanno(method_call, 'live_val'))
def test_parameter_class_members_with_value_hints(self):
def test_fn(opt):
opt.minimize(0)
node = self._parse_and_analyze(
test_fn, {'training': training},
arg_types={
'opt': (training.GradientDescentOptimizer.__name__,
training.GradientDescentOptimizer)
})
method_call = node.body[0].body[0].value.func
self.assertEquals(training.GradientDescentOptimizer.minimize,
anno.getanno(method_call, 'live_val'))
def test_function_variables(self):
def bar():
pass
def test_fn():
foo = bar
foo()
node = self._parse_and_analyze(test_fn, {'bar': bar})
method_call = node.body[0].body[1].value.func
self.assertFalse(anno.hasanno(method_call, 'live_val'))
def test_nested_members(self):
def test_fn():
foo = training.GradientDescentOptimizer(0.1)
foo.bar.baz()
node = self._parse_and_analyze(test_fn, {'training': training})
method_call = node.body[0].body[1].value.func
self.assertFalse(anno.hasanno(method_call, 'live_val'))
if __name__ == '__main__':
test.main()
|
rabipanda/tensorflow
|
tensorflow/contrib/py2tf/pyct/static_analysis/type_info_test.py
|
Python
|
apache-2.0
| 5,773
|
### Copyright (C) 2002-2005 Stephen Kennedy <stevek@gnome.org>
### Redistribution and use in source and binary forms, with or without
### modification, are permitted provided that the following conditions
### are met:
###
### 1. Redistributions of source code must retain the above copyright
### notice, this list of conditions and the following disclaimer.
### 2. Redistributions in binary form must reproduce the above copyright
### notice, this list of conditions and the following disclaimer in the
### documentation and/or other materials provided with the distribution.
### THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
### IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
### OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
### IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
### INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
### NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
### DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
### THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
### (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
### THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import subprocess
from gettext import gettext as _
# ignored, new, normal, ignored changes,
# error, placeholder, vc added
# vc modified, vc conflict, vc removed
# locally removed, end
STATE_IGNORED, STATE_NONE, STATE_NORMAL, STATE_NOCHANGE, \
STATE_ERROR, STATE_EMPTY, STATE_NEW, \
STATE_MODIFIED, STATE_CONFLICT, STATE_REMOVED, \
STATE_MISSING, STATE_MAX = range(12)
class Entry(object):
# These are the possible states of files. Be sure to get the colons correct.
states = _("Ignored:Unversioned:::Error::Newly added:Modified:<b>Conflict</b>:Removed:Missing").split(":")
assert len(states)==STATE_MAX
def __init__(self, path, name, state):
self.path = path
self.state = state
self.parent, self.name = os.path.split(path.rstrip("/"))
def __str__(self):
return "<%s:%s %s>\n" % (self.__class__, self.name, (self.path, self.state))
def __repr__(self):
return "%s %s\n" % (self.name, (self.path, self.state))
def get_status(self):
return self.states[self.state]
class Dir(Entry):
def __init__(self, path, name, state):
Entry.__init__(self, path, name, state)
self.isdir = 1
self.rev = ""
self.tag = ""
self.options = ""
class File(Entry):
def __init__(self, path, name, state, rev="", tag="", options=""):
assert path[-1] != "/"
Entry.__init__(self, path, name, state)
self.isdir = 0
self.rev = rev
self.tag = tag
self.options = options
class Vc(object):
PATCH_STRIP_NUM = 0
PATCH_INDEX_RE = ''
VC_DIR = None
VC_ROOT_WALK = True
VC_METADATA = None
def __init__(self, location):
if self.VC_ROOT_WALK:
self.root = self.find_repo_root(location)
else:
self.root = self.check_repo_root(location)
def commit_command(self, message):
raise NotImplementedError()
def diff_command(self):
raise NotImplementedError()
def update_command(self):
raise NotImplementedError()
def add_command(self, binary=0):
raise NotImplementedError()
def remove_command(self, force=0):
raise NotImplementedError()
def revert_command(self):
raise NotImplementedError()
def resolved_command(self):
raise NotImplementedError()
def patch_command(self, workdir):
return ["patch","--strip=%i"%self.PATCH_STRIP_NUM,"--reverse","--directory=%s" % workdir]
def check_repo_root(self, location):
if not os.path.isdir(os.path.join(location, self.VC_DIR)):
raise ValueError
return location
def find_repo_root(self, location):
while True:
try:
return self.check_repo_root(location)
except ValueError:
pass
tmp = os.path.dirname(location)
if tmp == location:
break
location = tmp
raise ValueError()
def get_working_directory(self, workdir):
return workdir
def cache_inventory(self, topdir):
pass
def uncache_inventory(self):
pass
def get_patch_files(self, patch):
regex = re.compile(self.PATCH_INDEX_RE, re.M)
return [f.strip() for f in regex.findall(patch)]
def listdir_filter(self, entries):
return [f for f in entries if f != self.VC_DIR]
def listdir(self, start):
if start=="": start="."
cfiles = []
cdirs = []
try:
entries = os.listdir(start)
entries.sort()
except OSError:
entries = []
for f in self.listdir_filter(entries):
fname = os.path.join(start, f)
lname = fname
if os.path.isdir(fname):
cdirs.append( (f, lname) )
else:
cfiles.append( (f, lname) )
dirs, files = self.lookup_files(cdirs, cfiles)
return dirs+files
def lookup_files(self, dirs, files):
"Assume all files are in the same dir, files is an array of (name, path) tuples."
directory = self._get_directoryname(files, dirs)
if directory is None:
return [], []
else:
return self._get_dirsandfiles(directory, dirs, files)
def _get_directoryname(self, dirs, files):
directory = None
if len(files):
directory = os.path.dirname(files[0][1])
elif len(dirs):
directory = os.path.dirname(dirs[0][1])
return directory
def _get_dirsandfiles(self, directory, dirs, files):
raise NotImplementedError()
class CachedVc(Vc):
def __init__(self, location):
super(CachedVc, self).__init__(location)
self._tree_cache = None
def cache_inventory(self, directory):
self._tree_cache = self._lookup_tree_cache(directory)
def uncache_inventory(self):
self._tree_cache = None
def _lookup_tree_cache(self, directory):
raise NotImplementedError()
def _get_tree_cache(self, directory):
if self._tree_cache is None:
self.cache_inventory(directory)
return self._tree_cache
def popen(cmd, cwd=None):
return subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE).stdout
|
babycaseny/meld
|
vc/_vc.py
|
Python
|
gpl-2.0
| 6,611
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test HTTP support.
"""
import random, cgi, base64
try:
from urlparse import (
ParseResult as ParseResultBytes, urlparse, urlunsplit, clear_cache)
except ImportError:
from urllib.parse import (
ParseResultBytes, urlparse, urlunsplit, clear_cache)
from twisted.python.compat import _PY3, iterbytes, networkString, unicode, intToBytes
from twisted.python.failure import Failure
from twisted.trial import unittest
from twisted.trial.unittest import TestCase
from twisted.web import http, http_headers
from twisted.web.http import PotentialDataLoss, _DataLoss
from twisted.web.http import _IdentityTransferDecoder
from twisted.internet.task import Clock
from twisted.internet.error import ConnectionLost
from twisted.protocols import loopback
from twisted.test.proto_helpers import StringTransport
from twisted.test.test_internet import DummyProducer
from twisted.web.test.requesthelper import DummyChannel
class DateTimeTest(unittest.TestCase):
"""Test date parsing functions."""
def testRoundtrip(self):
for i in range(10000):
time = random.randint(0, 2000000000)
timestr = http.datetimeToString(time)
time2 = http.stringToDatetime(timestr)
self.assertEqual(time, time2)
class DummyHTTPHandler(http.Request):
def process(self):
self.content.seek(0, 0)
data = self.content.read()
length = self.getHeader(b'content-length')
if length is None:
length = networkString(str(length))
request = b"'''\n" + length + b"\n" + data + b"'''\n"
self.setResponseCode(200)
self.setHeader(b"Request", self.uri)
self.setHeader(b"Command", self.method)
self.setHeader(b"Version", self.clientproto)
self.setHeader(b"Content-Length", intToBytes(len(request)))
self.write(request)
self.finish()
class LoopbackHTTPClient(http.HTTPClient):
def connectionMade(self):
self.sendCommand(b"GET", b"/foo/bar")
self.sendHeader(b"Content-Length", 10)
self.endHeaders()
self.transport.write(b"0123456789")
class ResponseTestMixin(object):
"""
A mixin that provides a simple means of comparing an actual response string
to an expected response string by performing the minimal parsing.
"""
def assertResponseEquals(self, responses, expected):
"""
Assert that the C{responses} matches the C{expected} responses.
@type responses: C{bytes}
@param responses: The bytes sent in response to one or more requests.
@type expected: C{list} of C{tuple} of C{bytes}
@param expected: The expected values for the responses. Each tuple
element of the list represents one response. Each byte string
element of the tuple is a full header line without delimiter, except
for the last element which gives the full response body.
"""
for response in expected:
expectedHeaders, expectedContent = response[:-1], response[-1]
# Intentionally avoid mutating the inputs here.
expectedStatus = expectedHeaders[0]
expectedHeaders = expectedHeaders[1:]
headers, rest = responses.split(b'\r\n\r\n', 1)
headers = headers.splitlines()
status = headers.pop(0)
self.assertEqual(expectedStatus, status)
self.assertEqual(set(headers), set(expectedHeaders))
content = rest[:len(expectedContent)]
responses = rest[len(expectedContent):]
self.assertEqual(content, expectedContent)
class HTTP1_0TestCase(unittest.TestCase, ResponseTestMixin):
requests = (
b"GET / HTTP/1.0\r\n"
b"\r\n"
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"\r\n")
expected_response = [
(b"HTTP/1.0 200 OK",
b"Request: /",
b"Command: GET",
b"Version: HTTP/1.0",
b"Content-Length: 13",
b"'''\nNone\n'''\n")]
def test_buffer(self):
"""
Send requests over a channel and check responses match what is expected.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DummyHTTPHandler
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
a.connectionLost(IOError("all one"))
value = b.value()
self.assertResponseEquals(value, self.expected_response)
def test_requestBodyTimeout(self):
"""
L{HTTPChannel} resets its timeout whenever data from a request body is
delivered to it.
"""
clock = Clock()
transport = StringTransport()
protocol = http.HTTPChannel()
protocol.timeOut = 100
protocol.callLater = clock.callLater
protocol.makeConnection(transport)
protocol.dataReceived(b'POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
clock.advance(99)
self.assertFalse(transport.disconnecting)
protocol.dataReceived(b'x')
clock.advance(99)
self.assertFalse(transport.disconnecting)
protocol.dataReceived(b'x')
self.assertEqual(len(protocol.requests), 1)
class HTTP1_1TestCase(HTTP1_0TestCase):
requests = (
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"\r\n"
b"POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789HEAD / HTTP/1.1\r\n"
b"\r\n")
expected_response = [
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: GET",
b"Version: HTTP/1.1",
b"Content-Length: 13",
b"'''\nNone\n'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: POST",
b"Version: HTTP/1.1",
b"Content-Length: 21",
b"'''\n10\n0123456789'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: POST",
b"Version: HTTP/1.1",
b"Content-Length: 21",
b"'''\n10\n0123456789'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: HEAD",
b"Version: HTTP/1.1",
b"Content-Length: 13",
b"")]
class HTTP1_1_close_TestCase(HTTP1_0TestCase):
requests = (
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"Connection: close\r\n"
b"\r\n"
b"GET / HTTP/1.0\r\n"
b"\r\n")
expected_response = [
(b"HTTP/1.1 200 OK",
b"Connection: close",
b"Request: /",
b"Command: GET",
b"Version: HTTP/1.1",
b"Content-Length: 13",
b"'''\nNone\n'''\n")]
class HTTP0_9TestCase(HTTP1_0TestCase):
requests = (
b"GET /\r\n")
expected_response = b"HTTP/1.1 400 Bad Request\r\n\r\n"
def assertResponseEquals(self, response, expectedResponse):
self.assertEqual(response, expectedResponse)
class HTTPLoopbackTestCase(unittest.TestCase):
expectedHeaders = {b'request': b'/foo/bar',
b'command': b'GET',
b'version': b'HTTP/1.0',
b'content-length': b'21'}
numHeaders = 0
gotStatus = 0
gotResponse = 0
gotEndHeaders = 0
def _handleStatus(self, version, status, message):
self.gotStatus = 1
self.assertEqual(version, b"HTTP/1.0")
self.assertEqual(status, b"200")
def _handleResponse(self, data):
self.gotResponse = 1
self.assertEqual(data, b"'''\n10\n0123456789'''\n")
def _handleHeader(self, key, value):
self.numHeaders = self.numHeaders + 1
self.assertEqual(self.expectedHeaders[key.lower()], value)
def _handleEndHeaders(self):
self.gotEndHeaders = 1
self.assertEqual(self.numHeaders, 4)
def testLoopback(self):
server = http.HTTPChannel()
server.requestFactory = DummyHTTPHandler
client = LoopbackHTTPClient()
client.handleResponse = self._handleResponse
client.handleHeader = self._handleHeader
client.handleEndHeaders = self._handleEndHeaders
client.handleStatus = self._handleStatus
d = loopback.loopbackAsync(server, client)
d.addCallback(self._cbTestLoopback)
return d
def _cbTestLoopback(self, ignored):
if not (self.gotStatus and self.gotResponse and self.gotEndHeaders):
raise RuntimeError(
"didn't got all callbacks %s"
% [self.gotStatus, self.gotResponse, self.gotEndHeaders])
del self.gotEndHeaders
del self.gotResponse
del self.gotStatus
del self.numHeaders
def _prequest(**headers):
"""
Make a request with the given request headers for the persistence tests.
"""
request = http.Request(DummyChannel(), False)
for headerName, v in headers.items():
request.requestHeaders.setRawHeaders(networkString(headerName), v)
return request
class PersistenceTestCase(unittest.TestCase):
"""
Tests for persistent HTTP connections.
"""
def setUp(self):
self.channel = http.HTTPChannel()
self.request = _prequest()
def test_http09(self):
"""
After being used for an I{HTTP/0.9} request, the L{HTTPChannel} is not
persistent.
"""
persist = self.channel.checkPersistence(self.request, b"HTTP/0.9")
self.assertFalse(persist)
self.assertEqual(
[], list(self.request.responseHeaders.getAllRawHeaders()))
def test_http10(self):
"""
After being used for an I{HTTP/1.0} request, the L{HTTPChannel} is not
persistent.
"""
persist = self.channel.checkPersistence(self.request, b"HTTP/1.0")
self.assertFalse(persist)
self.assertEqual(
[], list(self.request.responseHeaders.getAllRawHeaders()))
def test_http11(self):
"""
After being used for an I{HTTP/1.1} request, the L{HTTPChannel} is
persistent.
"""
persist = self.channel.checkPersistence(self.request, b"HTTP/1.1")
self.assertTrue(persist)
self.assertEqual(
[], list(self.request.responseHeaders.getAllRawHeaders()))
def test_http11Close(self):
"""
After being used for an I{HTTP/1.1} request with a I{Connection: Close}
header, the L{HTTPChannel} is not persistent.
"""
request = _prequest(connection=[b"close"])
persist = self.channel.checkPersistence(request, b"HTTP/1.1")
self.assertFalse(persist)
self.assertEqual(
[(b"Connection", [b"close"])],
list(request.responseHeaders.getAllRawHeaders()))
class IdentityTransferEncodingTests(TestCase):
"""
Tests for L{_IdentityTransferDecoder}.
"""
def setUp(self):
"""
Create an L{_IdentityTransferDecoder} with callbacks hooked up so that
calls to them can be inspected.
"""
self.data = []
self.finish = []
self.contentLength = 10
self.decoder = _IdentityTransferDecoder(
self.contentLength, self.data.append, self.finish.append)
def test_exactAmountReceived(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called with a byte string
with length equal to the content length passed to
L{_IdentityTransferDecoder}'s initializer, the data callback is invoked
with that string and the finish callback is invoked with a zero-length
string.
"""
self.decoder.dataReceived(b'x' * self.contentLength)
self.assertEqual(self.data, [b'x' * self.contentLength])
self.assertEqual(self.finish, [b''])
def test_shortStrings(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called multiple times
with byte strings which, when concatenated, are as long as the content
length provided, the data callback is invoked with each string and the
finish callback is invoked only after the second call.
"""
self.decoder.dataReceived(b'x')
self.assertEqual(self.data, [b'x'])
self.assertEqual(self.finish, [])
self.decoder.dataReceived(b'y' * (self.contentLength - 1))
self.assertEqual(self.data, [b'x', b'y' * (self.contentLength - 1)])
self.assertEqual(self.finish, [b''])
def test_longString(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called with a byte string
with length greater than the provided content length, only the prefix
of that string up to the content length is passed to the data callback
and the remainder is passed to the finish callback.
"""
self.decoder.dataReceived(b'x' * self.contentLength + b'y')
self.assertEqual(self.data, [b'x' * self.contentLength])
self.assertEqual(self.finish, [b'y'])
def test_rejectDataAfterFinished(self):
"""
If data is passed to L{_IdentityTransferDecoder.dataReceived} after the
finish callback has been invoked, C{RuntimeError} is raised.
"""
failures = []
def finish(bytes):
try:
decoder.dataReceived(b'foo')
except:
failures.append(Failure())
decoder = _IdentityTransferDecoder(5, self.data.append, finish)
decoder.dataReceived(b'x' * 4)
self.assertEqual(failures, [])
decoder.dataReceived(b'y')
failures[0].trap(RuntimeError)
self.assertEqual(
str(failures[0].value),
"_IdentityTransferDecoder cannot decode data after finishing")
def test_unknownContentLength(self):
"""
If L{_IdentityTransferDecoder} is constructed with C{None} for the
content length, it passes all data delivered to it through to the data
callback.
"""
data = []
finish = []
decoder = _IdentityTransferDecoder(None, data.append, finish.append)
decoder.dataReceived(b'x')
self.assertEqual(data, [b'x'])
decoder.dataReceived(b'y')
self.assertEqual(data, [b'x', b'y'])
self.assertEqual(finish, [])
def _verifyCallbacksUnreferenced(self, decoder):
"""
Check the decoder's data and finish callbacks and make sure they are
None in order to help avoid references cycles.
"""
self.assertIdentical(decoder.dataCallback, None)
self.assertIdentical(decoder.finishCallback, None)
def test_earlyConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} raises L{_DataLoss} if it is
called and the content length is known but not enough bytes have been
delivered.
"""
self.decoder.dataReceived(b'x' * (self.contentLength - 1))
self.assertRaises(_DataLoss, self.decoder.noMoreData)
self._verifyCallbacksUnreferenced(self.decoder)
def test_unknownContentLengthConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} calls the finish callback and
raises L{PotentialDataLoss} if it is called and the content length is
unknown.
"""
body = []
finished = []
decoder = _IdentityTransferDecoder(None, body.append, finished.append)
self.assertRaises(PotentialDataLoss, decoder.noMoreData)
self.assertEqual(body, [])
self.assertEqual(finished, [b''])
self._verifyCallbacksUnreferenced(decoder)
def test_finishedConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} does not raise any exception if
it is called when the content length is known and that many bytes have
been delivered.
"""
self.decoder.dataReceived(b'x' * self.contentLength)
self.decoder.noMoreData()
self._verifyCallbacksUnreferenced(self.decoder)
class ChunkedTransferEncodingTests(unittest.TestCase):
"""
Tests for L{_ChunkedTransferDecoder}, which turns a byte stream encoded
using HTTP I{chunked} C{Transfer-Encoding} back into the original byte
stream.
"""
def test_decoding(self):
"""
L{_ChunkedTransferDecoder.dataReceived} decodes chunked-encoded data
and passes the result to the specified callback.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived(b'3\r\nabc\r\n5\r\n12345\r\n')
p.dataReceived(b'a\r\n0123456789\r\n')
self.assertEqual(L, [b'abc', b'12345', b'0123456789'])
def test_short(self):
"""
L{_ChunkedTransferDecoder.dataReceived} decodes chunks broken up and
delivered in multiple calls.
"""
L = []
finished = []
p = http._ChunkedTransferDecoder(L.append, finished.append)
for s in iterbytes(b'3\r\nabc\r\n5\r\n12345\r\n0\r\n\r\n'):
p.dataReceived(s)
self.assertEqual(L, [b'a', b'b', b'c', b'1', b'2', b'3', b'4', b'5'])
self.assertEqual(finished, [b''])
def test_newlines(self):
"""
L{_ChunkedTransferDecoder.dataReceived} doesn't treat CR LF pairs
embedded in chunk bodies specially.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived(b'2\r\n\r\n\r\n')
self.assertEqual(L, [b'\r\n'])
def test_extensions(self):
"""
L{_ChunkedTransferDecoder.dataReceived} disregards chunk-extension
fields.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived(b'3; x-foo=bar\r\nabc\r\n')
self.assertEqual(L, [b'abc'])
def test_finish(self):
"""
L{_ChunkedTransferDecoder.dataReceived} interprets a zero-length
chunk as the end of the chunked data stream and calls the completion
callback.
"""
finished = []
p = http._ChunkedTransferDecoder(None, finished.append)
p.dataReceived(b'0\r\n\r\n')
self.assertEqual(finished, [b''])
def test_extra(self):
"""
L{_ChunkedTransferDecoder.dataReceived} passes any bytes which come
after the terminating zero-length chunk to the completion callback.
"""
finished = []
p = http._ChunkedTransferDecoder(None, finished.append)
p.dataReceived(b'0\r\n\r\nhello')
self.assertEqual(finished, [b'hello'])
def test_afterFinished(self):
"""
L{_ChunkedTransferDecoder.dataReceived} raises C{RuntimeError} if it
is called after it has seen the last chunk.
"""
p = http._ChunkedTransferDecoder(None, lambda bytes: None)
p.dataReceived(b'0\r\n\r\n')
self.assertRaises(RuntimeError, p.dataReceived, b'hello')
def test_earlyConnectionLose(self):
"""
L{_ChunkedTransferDecoder.noMoreData} raises L{_DataLoss} if it is
called and the end of the last trailer has not yet been received.
"""
parser = http._ChunkedTransferDecoder(None, lambda bytes: None)
parser.dataReceived(b'0\r\n\r')
exc = self.assertRaises(_DataLoss, parser.noMoreData)
self.assertEqual(
str(exc),
"Chunked decoder in 'TRAILER' state, still expecting more data "
"to get to 'FINISHED' state.")
def test_finishedConnectionLose(self):
"""
L{_ChunkedTransferDecoder.noMoreData} does not raise any exception if
it is called after the terminal zero length chunk is received.
"""
parser = http._ChunkedTransferDecoder(None, lambda bytes: None)
parser.dataReceived(b'0\r\n\r\n')
parser.noMoreData()
def test_reentrantFinishedNoMoreData(self):
"""
L{_ChunkedTransferDecoder.noMoreData} can be called from the finished
callback without raising an exception.
"""
errors = []
successes = []
def finished(extra):
try:
parser.noMoreData()
except:
errors.append(Failure())
else:
successes.append(True)
parser = http._ChunkedTransferDecoder(None, finished)
parser.dataReceived(b'0\r\n\r\n')
self.assertEqual(errors, [])
self.assertEqual(successes, [True])
class ChunkingTestCase(unittest.TestCase):
strings = [b"abcv", b"", b"fdfsd423", b"Ffasfas\r\n",
b"523523\n\rfsdf", b"4234"]
def testChunks(self):
for s in self.strings:
chunked = b''.join(http.toChunk(s))
self.assertEqual((s, b''), http.fromChunk(chunked))
self.assertRaises(ValueError, http.fromChunk, b'-5\r\nmalformed!\r\n')
def testConcatenatedChunks(self):
chunked = b''.join([b''.join(http.toChunk(t)) for t in self.strings])
result = []
buffer = b""
for c in iterbytes(chunked):
buffer = buffer + c
try:
data, buffer = http.fromChunk(buffer)
result.append(data)
except ValueError:
pass
self.assertEqual(result, self.strings)
class ParsingTestCase(unittest.TestCase):
"""
Tests for protocol parsing in L{HTTPChannel}.
"""
def setUp(self):
self.didRequest = False
def runRequest(self, httpRequest, requestClass, success=1):
httpRequest = httpRequest.replace(b"\n", b"\r\n")
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = requestClass
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in iterbytes(httpRequest):
if a.transport.disconnecting:
break
a.dataReceived(byte)
a.connectionLost(IOError("all done"))
if success:
self.assertTrue(self.didRequest)
else:
self.assertFalse(self.didRequest)
return a
def test_basicAuth(self):
"""
L{HTTPChannel} provides username and password information supplied in
an I{Authorization} header to the L{Request} which makes it available
via its C{getUser} and C{getPassword} methods.
"""
requests = []
class Request(http.Request):
def process(self):
self.credentials = (self.getUser(), self.getPassword())
requests.append(self)
for u, p in [(b"foo", b"bar"), (b"hello", b"there:z")]:
s = base64.encodestring(b":".join((u, p))).strip()
f = b"GET / HTTP/1.0\nAuthorization: Basic " + s + b"\n\n"
self.runRequest(f, Request, 0)
req = requests.pop()
self.assertEqual((u, p), req.credentials)
def test_headers(self):
"""
Headers received by L{HTTPChannel} in a request are made available to
the L{Request}.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
requestLines = [
b"GET / HTTP/1.0",
b"Foo: bar",
b"baz: Quux",
b"baz: quux",
b"",
b""]
self.runRequest(b'\n'.join(requestLines), MyRequest, 0)
[request] = processed
self.assertEqual(
request.requestHeaders.getRawHeaders(b'foo'), [b'bar'])
self.assertEqual(
request.requestHeaders.getRawHeaders(b'bAz'), [b'Quux', b'quux'])
def test_tooManyHeaders(self):
"""
L{HTTPChannel} enforces a limit of C{HTTPChannel.maxHeaders} on the
number of headers received per request.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
requestLines = [b"GET / HTTP/1.0"]
for i in range(http.HTTPChannel.maxHeaders + 2):
requestLines.append(networkString("%s: foo" % (i,)))
requestLines.extend([b"", b""])
channel = self.runRequest(b"\n".join(requestLines), MyRequest, 0)
self.assertEqual(processed, [])
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
def test_invalidHeaders(self):
"""
If a Content-Length header with a non-integer value is received, a 400
(Bad Request) response is sent to the client and the connection is
closed.
"""
requestLines = [b"GET / HTTP/1.0", b"Content-Length: x", b"", b""]
channel = self.runRequest(b"\n".join(requestLines), http.Request, 0)
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
self.assertTrue(channel.transport.disconnecting)
def test_headerLimitPerRequest(self):
"""
L{HTTPChannel} enforces the limit of C{HTTPChannel.maxHeaders} per
request so that headers received in an earlier request do not count
towards the limit when processing a later request.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
self.patch(http.HTTPChannel, 'maxHeaders', 1)
requestLines = [
b"GET / HTTP/1.1",
b"Foo: bar",
b"",
b"",
b"GET / HTTP/1.1",
b"Bar: baz",
b"",
b""]
channel = self.runRequest(b"\n".join(requestLines), MyRequest, 0)
[first, second] = processed
self.assertEqual(first.getHeader(b'foo'), b'bar')
self.assertEqual(second.getHeader(b'bar'), b'baz')
self.assertEqual(
channel.transport.value(),
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'0\r\n'
b'\r\n'
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'0\r\n'
b'\r\n')
def testCookies(self):
"""
Test cookies parsing and reading.
"""
httpRequest = b'''\
GET / HTTP/1.0
Cookie: rabbit="eat carrot"; ninja=secret; spam="hey 1=1!"
'''
cookies = {}
testcase = self
class MyRequest(http.Request):
def process(self):
for name in [b'rabbit', b'ninja', b'spam']:
cookies[name] = self.getCookie(name)
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(
cookies, {
b'rabbit': b'"eat carrot"',
b'ninja': b'secret',
b'spam': b'"hey 1=1!"'})
def testGET(self):
httpRequest = b'''\
GET /?key=value&multiple=two+words&multiple=more%20words&empty= HTTP/1.0
'''
method = []
args = []
testcase = self
class MyRequest(http.Request):
def process(self):
method.append(self.method)
args.extend([
self.args[b"key"],
self.args[b"empty"],
self.args[b"multiple"]])
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(method, [b"GET"])
self.assertEqual(
args, [[b"value"], [b""], [b"two words", b"more words"]])
def test_extraQuestionMark(self):
"""
While only a single '?' is allowed in an URL, several other servers
allow several and pass all after the first through as part of the
query arguments. Test that we emulate this behavior.
"""
httpRequest = b'GET /foo?bar=?&baz=quux HTTP/1.0\n\n'
method = []
path = []
args = []
testcase = self
class MyRequest(http.Request):
def process(self):
method.append(self.method)
path.append(self.path)
args.extend([self.args[b'bar'], self.args[b'baz']])
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(method, [b'GET'])
self.assertEqual(path, [b'/foo'])
self.assertEqual(args, [[b'?'], [b'quux']])
def test_formPOSTRequest(self):
"""
The request body of a I{POST} request with a I{Content-Type} header
of I{application/x-www-form-urlencoded} is parsed according to that
content type and made available in the C{args} attribute of the
request object. The original bytes of the request may still be read
from the C{content} attribute.
"""
query = 'key=value&multiple=two+words&multiple=more%20words&empty='
httpRequest = networkString('''\
POST / HTTP/1.0
Content-Length: %d
Content-Type: application/x-www-form-urlencoded
%s''' % (len(query), query))
method = []
args = []
content = []
testcase = self
class MyRequest(http.Request):
def process(self):
method.append(self.method)
args.extend([
self.args[b'key'], self.args[b'empty'],
self.args[b'multiple']])
content.append(self.content.read())
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(method, [b"POST"])
self.assertEqual(
args, [[b"value"], [b""], [b"two words", b"more words"]])
# Reading from the content file-like must produce the entire request
# body.
self.assertEqual(content, [networkString(query)])
def testMissingContentDisposition(self):
req = b'''\
POST / HTTP/1.0
Content-Type: multipart/form-data; boundary=AaB03x
Content-Length: 103
--AaB03x
Content-Type: text/plain
Content-Transfer-Encoding: quoted-printable
abasdfg
--AaB03x--
'''
self.runRequest(req, http.Request, success=False)
if _PY3:
testMissingContentDisposition.skip = (
"Cannot parse multipart/form-data on Python 3. "
"See http://bugs.python.org/issue12411 and #5511.")
def test_chunkedEncoding(self):
"""
If a request uses the I{chunked} transfer encoding, the request body is
decoded accordingly before it is made available on the request.
"""
httpRequest = b'''\
GET / HTTP/1.0
Content-Type: text/plain
Transfer-Encoding: chunked
6
Hello,
14
spam,eggs spam spam
0
'''
path = []
method = []
content = []
decoder = []
testcase = self
class MyRequest(http.Request):
def process(self):
content.append(self.content.fileno())
content.append(self.content.read())
method.append(self.method)
path.append(self.path)
decoder.append(self.channel._transferDecoder)
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
# The tempfile API used to create content returns an
# instance of a different type depending on what platform
# we're running on. The point here is to verify that the
# request body is in a file that's on the filesystem.
# Having a fileno method that returns an int is a somewhat
# close approximation of this. -exarkun
self.assertIsInstance(content[0], int)
self.assertEqual(content[1], b'Hello, spam,eggs spam spam')
self.assertEqual(method, [b'GET'])
self.assertEqual(path, [b'/'])
self.assertEqual(decoder, [None])
def test_malformedChunkedEncoding(self):
"""
If a request uses the I{chunked} transfer encoding, but provides an
invalid chunk length value, the request fails with a 400 error.
"""
# See test_chunkedEncoding for the correct form of this request.
httpRequest = b'''\
GET / HTTP/1.1
Content-Type: text/plain
Transfer-Encoding: chunked
MALFORMED_LINE_THIS_SHOULD_BE_'6'
Hello,
14
spam,eggs spam spam
0
'''
didRequest = []
class MyRequest(http.Request):
def process(self):
# This request should fail, so this should never be called.
didRequest.append(True)
channel = self.runRequest(httpRequest, MyRequest, success=False)
self.assertFalse(didRequest, "Request.process called")
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
self.assertTrue(channel.transport.disconnecting)
class QueryArgumentsTestCase(unittest.TestCase):
def testParseqs(self):
self.assertEqual(
cgi.parse_qs(b"a=b&d=c;+=f"),
http.parse_qs(b"a=b&d=c;+=f"))
self.assertRaises(
ValueError, http.parse_qs, b"blah", strict_parsing=True)
self.assertEqual(
cgi.parse_qs(b"a=&b=c", keep_blank_values=1),
http.parse_qs(b"a=&b=c", keep_blank_values=1))
self.assertEqual(
cgi.parse_qs(b"a=&b=c"),
http.parse_qs(b"a=&b=c"))
def test_urlparse(self):
"""
For a given URL, L{http.urlparse} should behave the same as L{urlparse},
except it should always return C{bytes}, never text.
"""
def urls():
for scheme in (b'http', b'https'):
for host in (b'example.com',):
for port in (None, 100):
for path in (b'', b'path'):
if port is not None:
host = host + b':' + networkString(str(port))
yield urlunsplit((scheme, host, path, b'', b''))
def assertSameParsing(url, decode):
"""
Verify that C{url} is parsed into the same objects by both
L{http.urlparse} and L{urlparse}.
"""
urlToStandardImplementation = url
if decode:
urlToStandardImplementation = url.decode('ascii')
# stdlib urlparse will give back whatever type we give it. To be
# able to compare the values meaningfully, if it gives back unicode,
# convert all the values to bytes.
standardResult = urlparse(urlToStandardImplementation)
if isinstance(standardResult.scheme, unicode):
# The choice of encoding is basically irrelevant. The values
# are all in ASCII. UTF-8 is, of course, the correct choice.
expected = (standardResult.scheme.encode('utf-8'),
standardResult.netloc.encode('utf-8'),
standardResult.path.encode('utf-8'),
standardResult.params.encode('utf-8'),
standardResult.query.encode('utf-8'),
standardResult.fragment.encode('utf-8'))
else:
expected = (standardResult.scheme,
standardResult.netloc,
standardResult.path,
standardResult.params,
standardResult.query,
standardResult.fragment)
scheme, netloc, path, params, query, fragment = http.urlparse(url)
self.assertEqual(
(scheme, netloc, path, params, query, fragment), expected)
self.assertIsInstance(scheme, bytes)
self.assertIsInstance(netloc, bytes)
self.assertIsInstance(path, bytes)
self.assertIsInstance(params, bytes)
self.assertIsInstance(query, bytes)
self.assertIsInstance(fragment, bytes)
# With caching, unicode then str
clear_cache()
for url in urls():
assertSameParsing(url, True)
assertSameParsing(url, False)
# With caching, str then unicode
clear_cache()
for url in urls():
assertSameParsing(url, False)
assertSameParsing(url, True)
# Without caching
for url in urls():
clear_cache()
assertSameParsing(url, True)
clear_cache()
assertSameParsing(url, False)
def test_urlparseRejectsUnicode(self):
"""
L{http.urlparse} should reject unicode input early.
"""
self.assertRaises(TypeError, http.urlparse, u'http://example.org/path')
class ClientDriver(http.HTTPClient):
def handleStatus(self, version, status, message):
self.version = version
self.status = status
self.message = message
class ClientStatusParsing(unittest.TestCase):
def testBaseline(self):
c = ClientDriver()
c.lineReceived(b'HTTP/1.0 201 foo')
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.message, b'foo')
def testNoMessage(self):
c = ClientDriver()
c.lineReceived(b'HTTP/1.0 201')
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.message, b'')
def testNoMessage_trailingSpace(self):
c = ClientDriver()
c.lineReceived(b'HTTP/1.0 201 ')
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.message, b'')
class RequestTests(unittest.TestCase, ResponseTestMixin):
"""
Tests for L{http.Request}
"""
def _compatHeadersTest(self, oldName, newName):
"""
Verify that each of two different attributes which are associated with
the same state properly reflect changes made through the other.
This is used to test that the C{headers}/C{responseHeaders} and
C{received_headers}/C{requestHeaders} pairs interact properly.
"""
req = http.Request(DummyChannel(), False)
getattr(req, newName).setRawHeaders(b"test", [b"lemur"])
self.assertEqual(getattr(req, oldName)[b"test"], b"lemur")
setattr(req, oldName, {b"foo": b"bar"})
self.assertEqual(
list(getattr(req, newName).getAllRawHeaders()),
[(b"Foo", [b"bar"])])
setattr(req, newName, http_headers.Headers())
self.assertEqual(getattr(req, oldName), {})
def test_received_headers(self):
"""
L{Request.received_headers} is a backwards compatible API which
accesses and allows mutation of the state at L{Request.requestHeaders}.
"""
self._compatHeadersTest('received_headers', 'requestHeaders')
def test_headers(self):
"""
L{Request.headers} is a backwards compatible API which accesses and
allows mutation of the state at L{Request.responseHeaders}.
"""
self._compatHeadersTest('headers', 'responseHeaders')
def test_getHeader(self):
"""
L{http.Request.getHeader} returns the value of the named request
header.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur"])
self.assertEqual(req.getHeader(b"test"), b"lemur")
def test_getHeaderReceivedMultiples(self):
"""
When there are multiple values for a single request header,
L{http.Request.getHeader} returns the last value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur", b"panda"])
self.assertEqual(req.getHeader(b"test"), b"panda")
def test_getHeaderNotFound(self):
"""
L{http.Request.getHeader} returns C{None} when asked for the value of a
request header which is not present.
"""
req = http.Request(DummyChannel(), False)
self.assertEqual(req.getHeader(b"test"), None)
def test_getAllHeaders(self):
"""
L{http.Request.getAllheaders} returns a C{dict} mapping all request
header names to their corresponding values.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur"])
self.assertEqual(req.getAllHeaders(), {b"test": b"lemur"})
def test_getAllHeadersNoHeaders(self):
"""
L{http.Request.getAllHeaders} returns an empty C{dict} if there are no
request headers.
"""
req = http.Request(DummyChannel(), False)
self.assertEqual(req.getAllHeaders(), {})
def test_getAllHeadersMultipleHeaders(self):
"""
When there are multiple values for a single request header,
L{http.Request.getAllHeaders} returns only the last value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur", b"panda"])
self.assertEqual(req.getAllHeaders(), {b"test": b"panda"})
def test_setResponseCode(self):
"""
L{http.Request.setResponseCode} takes a status code and causes it to be
used as the response status.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.setResponseCode(201)
req.write(b'')
self.assertEqual(
channel.transport.written.getvalue().splitlines()[0],
b"(no clientproto yet) 201 Created")
def test_setResponseCodeAndMessage(self):
"""
L{http.Request.setResponseCode} takes a status code and a message and
causes them to be used as the response status.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.setResponseCode(202, "happily accepted")
req.write(b'')
self.assertEqual(
channel.transport.written.getvalue().splitlines()[0],
b'(no clientproto yet) 202 happily accepted')
def test_setResponseCodeAcceptsIntegers(self):
"""
L{http.Request.setResponseCode} accepts C{int} for the code parameter
and raises L{TypeError} if passed anything else.
"""
req = http.Request(DummyChannel(), False)
req.setResponseCode(1)
self.assertRaises(TypeError, req.setResponseCode, "1")
def test_setResponseCodeAcceptsLongIntegers(self):
"""
L{http.Request.setResponseCode} accepts C{long} for the code
parameter.
"""
req = http.Request(DummyChannel(), False)
req.setResponseCode(long(1))
if _PY3:
test_setResponseCodeAcceptsLongIntegers.skip = (
"Python 3 has no separate long integer type.")
def test_setHost(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should not be added because it is the default.
"""
req = http.Request(DummyChannel(), False)
req.setHost(b"example.com", 80)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com"])
def test_setHostSSL(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should not be added because it is the default.
"""
d = DummyChannel()
d.transport = DummyChannel.SSL()
req = http.Request(d, False)
req.setHost(b"example.com", 443)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com"])
def test_setHostNonDefaultPort(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should be added because it is not the default.
"""
req = http.Request(DummyChannel(), False)
req.setHost(b"example.com", 81)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com:81"])
def test_setHostSSLNonDefaultPort(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should be added because it is not the default.
"""
d = DummyChannel()
d.transport = DummyChannel.SSL()
req = http.Request(d, False)
req.setHost(b"example.com", 81)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com:81"])
def test_setHeader(self):
"""
L{http.Request.setHeader} sets the value of the given response header.
"""
req = http.Request(DummyChannel(), False)
req.setHeader(b"test", b"lemur")
self.assertEqual(req.responseHeaders.getRawHeaders(b"test"), [b"lemur"])
def test_firstWrite(self):
"""
For an HTTP 1.0 request, L{http.Request.write} sends an HTTP 1.0
Response-Line and whatever response headers are set.
"""
req = http.Request(DummyChannel(), False)
trans = StringTransport()
req.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.0"
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.0 200 OK",
b"Test: lemur",
b"Hello")])
def test_nonByteHeaderValue(self):
"""
L{http.Request.write} casts non-bytes header value to bytes
transparently.
"""
req = http.Request(DummyChannel(), False)
trans = StringTransport()
req.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.0"
req.responseHeaders.setRawHeaders(b"test", [10])
req.write(b'Hello')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.0 200 OK",
b"Test: 10",
b"Hello")])
warnings = self.flushWarnings(
offendingFunctions=[self.test_nonByteHeaderValue])
self.assertEqual(1, len(warnings))
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Passing non-bytes header values is deprecated since "
"Twisted 12.3. Pass only bytes instead.")
def test_firstWriteHTTP11Chunked(self):
"""
For an HTTP 1.1 request, L{http.Request.write} sends an HTTP 1.1
Response-Line, whatever response headers are set, and uses chunked
encoding for the response body.
"""
req = http.Request(DummyChannel(), False)
trans = StringTransport()
req.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.1"
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
req.write(b'World!')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.1 200 OK",
b"Test: lemur",
b"Transfer-Encoding: chunked",
b"5\r\nHello\r\n6\r\nWorld!\r\n")])
def test_firstWriteLastModified(self):
"""
For an HTTP 1.0 request for a resource with a known last modified time,
L{http.Request.write} sends an HTTP Response-Line, whatever response
headers are set, and a last-modified header with that time.
"""
req = http.Request(DummyChannel(), False)
trans = StringTransport()
req.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.0"
req.lastModified = 0
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.0 200 OK",
b"Test: lemur",
b"Last-Modified: Thu, 01 Jan 1970 00:00:00 GMT",
b"Hello")])
def test_receivedCookiesDefault(self):
"""
L{http.Request.received_cookies} defaults to an empty L{dict}.
"""
req = http.Request(DummyChannel(), False)
self.assertEqual(req.received_cookies, {})
def test_parseCookies(self):
"""
L{http.Request.parseCookies} extracts cookies from C{requestHeaders}
and adds them to C{received_cookies}.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'test="lemur"; test2="panda"'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b"test": b'"lemur"', b"test2": b'"panda"'})
def test_parseCookiesMultipleHeaders(self):
"""
L{http.Request.parseCookies} can extract cookies from multiple Cookie
headers.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'test="lemur"', b'test2="panda"'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b"test": b'"lemur"', b"test2": b'"panda"'})
def test_parseCookiesNoCookie(self):
"""
L{http.Request.parseCookies} can be called on a request without a
cookie header.
"""
req = http.Request(DummyChannel(), False)
req.parseCookies()
self.assertEqual(req.received_cookies, {})
def test_parseCookiesEmptyCookie(self):
"""
L{http.Request.parseCookies} can be called on a request with an
empty cookie header.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [])
req.parseCookies()
self.assertEqual(req.received_cookies, {})
def test_parseCookiesIgnoreValueless(self):
"""
L{http.Request.parseCookies} ignores cookies which don't have a
value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'foo; bar; baz;'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {})
def test_parseCookiesEmptyValue(self):
"""
L{http.Request.parseCookies} parses cookies with an empty value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'foo='])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b'foo': b''})
def test_parseCookiesRetainRightSpace(self):
"""
L{http.Request.parseCookies} leaves trailing whitespace in the
cookie value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'foo=bar '])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b'foo': b'bar '})
def test_parseCookiesStripLeftSpace(self):
"""
L{http.Request.parseCookies} strips leading whitespace in the
cookie key.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b' foo=bar'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b'foo': b'bar'})
def test_parseCookiesContinueAfterMalformedCookie(self):
"""
L{http.Request.parseCookies} parses valid cookies set before or
after malformed cookies.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'12345; test="lemur"; 12345; test2="panda"; 12345'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b"test": b'"lemur"', b"test2": b'"panda"'})
def test_connectionLost(self):
"""
L{http.Request.connectionLost} closes L{Request.content} and drops the
reference to the L{HTTPChannel} to assist with garbage collection.
"""
req = http.Request(DummyChannel(), False)
# Cause Request.content to be created at all.
req.gotLength(10)
# Grab a reference to content in case the Request drops it later on.
content = req.content
# Put some bytes into it
req.handleContentChunk(b"hello")
# Then something goes wrong and content should get closed.
req.connectionLost(Failure(ConnectionLost("Finished")))
self.assertTrue(content.closed)
self.assertIdentical(req.channel, None)
def test_registerProducerTwiceFails(self):
"""
Calling L{Request.registerProducer} when a producer is already
registered raises ValueError.
"""
req = http.Request(DummyChannel(), False)
req.registerProducer(DummyProducer(), True)
self.assertRaises(
ValueError, req.registerProducer, DummyProducer(), True)
def test_registerProducerWhenQueuedPausesPushProducer(self):
"""
Calling L{Request.registerProducer} with an IPushProducer when the
request is queued pauses the producer.
"""
req = http.Request(DummyChannel(), True)
producer = DummyProducer()
req.registerProducer(producer, True)
self.assertEqual(['pause'], producer.events)
def test_registerProducerWhenQueuedDoesntPausePullProducer(self):
"""
Calling L{Request.registerProducer} with an IPullProducer when the
request is queued does not pause the producer, because it doesn't make
sense to pause a pull producer.
"""
req = http.Request(DummyChannel(), True)
producer = DummyProducer()
req.registerProducer(producer, False)
self.assertEqual([], producer.events)
def test_registerProducerWhenQueuedDoesntRegisterPushProducer(self):
"""
Calling L{Request.registerProducer} with an IPushProducer when the
request is queued does not register the producer on the request's
transport.
"""
self.assertIdentical(
None, getattr(http.StringTransport, 'registerProducer', None),
"StringTransport cannot implement registerProducer for this test "
"to be valid.")
req = http.Request(DummyChannel(), True)
producer = DummyProducer()
req.registerProducer(producer, True)
# This is a roundabout assertion: http.StringTransport doesn't
# implement registerProducer, so Request.registerProducer can't have
# tried to call registerProducer on the transport.
self.assertIsInstance(req.transport, http.StringTransport)
def test_registerProducerWhenQueuedDoesntRegisterPullProducer(self):
"""
Calling L{Request.registerProducer} with an IPullProducer when the
request is queued does not register the producer on the request's
transport.
"""
self.assertIdentical(
None, getattr(http.StringTransport, 'registerProducer', None),
"StringTransport cannot implement registerProducer for this test "
"to be valid.")
req = http.Request(DummyChannel(), True)
producer = DummyProducer()
req.registerProducer(producer, False)
# This is a roundabout assertion: http.StringTransport doesn't
# implement registerProducer, so Request.registerProducer can't have
# tried to call registerProducer on the transport.
self.assertIsInstance(req.transport, http.StringTransport)
def test_registerProducerWhenNotQueuedRegistersPushProducer(self):
"""
Calling L{Request.registerProducer} with an IPushProducer when the
request is not queued registers the producer as a push producer on the
request's transport.
"""
req = http.Request(DummyChannel(), False)
producer = DummyProducer()
req.registerProducer(producer, True)
self.assertEqual([(producer, True)], req.transport.producers)
def test_registerProducerWhenNotQueuedRegistersPullProducer(self):
"""
Calling L{Request.registerProducer} with an IPullProducer when the
request is not queued registers the producer as a pull producer on the
request's transport.
"""
req = http.Request(DummyChannel(), False)
producer = DummyProducer()
req.registerProducer(producer, False)
self.assertEqual([(producer, False)], req.transport.producers)
def test_connectionLostNotification(self):
"""
L{Request.connectionLost} triggers all finish notification Deferreds
and cleans up per-request state.
"""
d = DummyChannel()
request = http.Request(d, True)
finished = request.notifyFinish()
request.connectionLost(Failure(ConnectionLost("Connection done")))
self.assertIdentical(request.channel, None)
return self.assertFailure(finished, ConnectionLost)
def test_finishNotification(self):
"""
L{Request.finish} triggers all finish notification Deferreds.
"""
request = http.Request(DummyChannel(), False)
finished = request.notifyFinish()
# Force the request to have a non-None content attribute. This is
# probably a bug in Request.
request.gotLength(1)
request.finish()
return finished
def test_writeAfterFinish(self):
"""
Calling L{Request.write} after L{Request.finish} has been called results
in a L{RuntimeError} being raised.
"""
request = http.Request(DummyChannel(), False)
finished = request.notifyFinish()
# Force the request to have a non-None content attribute. This is
# probably a bug in Request.
request.gotLength(1)
request.write(b'foobar')
request.finish()
self.assertRaises(RuntimeError, request.write, b'foobar')
return finished
def test_finishAfterConnectionLost(self):
"""
Calling L{Request.finish} after L{Request.connectionLost} has been
called results in a L{RuntimeError} being raised.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.connectionLost(Failure(ConnectionLost("The end.")))
self.assertRaises(RuntimeError, req.finish)
def test_reprUninitialized(self):
"""
L{Request.__repr__} returns the class name, object address, and
dummy-place holder values when used on a L{Request} which has not yet
been initialized.
"""
request = http.Request(DummyChannel(), False)
self.assertEqual(
repr(request),
'<Request at 0x%x method=(no method yet) uri=(no uri yet) '
'clientproto=(no clientproto yet)>' % (id(request),))
def test_reprInitialized(self):
"""
L{Request.__repr__} returns, as a L{str}, the class name, object
address, and the method, uri, and client protocol of the HTTP request
it represents. The string is in the form::
<Request at ADDRESS method=METHOD uri=URI clientproto=PROTOCOL>
"""
request = http.Request(DummyChannel(), False)
request.clientproto = b'HTTP/1.0'
request.method = b'GET'
request.uri = b'/foo/bar'
self.assertEqual(
repr(request),
'<Request at 0x%x method=GET uri=/foo/bar '
'clientproto=HTTP/1.0>' % (id(request),))
def test_reprSubclass(self):
"""
Subclasses of L{Request} inherit a C{__repr__} implementation which
includes the subclass's name in place of the string C{"Request"}.
"""
class Otherwise(http.Request):
pass
request = Otherwise(DummyChannel(), False)
self.assertEqual(
repr(request),
'<Otherwise at 0x%x method=(no method yet) uri=(no uri yet) '
'clientproto=(no clientproto yet)>' % (id(request),))
def test_unregisterNonQueuedNonStreamingProducer(self):
"""
L{Request.unregisterProducer} unregisters a non-queued non-streaming
producer from the request and the request's transport.
"""
req = http.Request(DummyChannel(), False)
req.transport = StringTransport()
req.registerProducer(DummyProducer(), False)
req.unregisterProducer()
self.assertEqual((None, None), (req.producer, req.transport.producer))
def test_unregisterNonQueuedStreamingProducer(self):
"""
L{Request.unregisterProducer} unregisters a non-queued streaming
producer from the request and the request's transport.
"""
req = http.Request(DummyChannel(), False)
req.transport = StringTransport()
req.registerProducer(DummyProducer(), True)
req.unregisterProducer()
self.assertEqual((None, None), (req.producer, req.transport.producer))
def test_unregisterQueuedNonStreamingProducer(self):
"""
L{Request.unregisterProducer} unregisters a queued non-streaming
producer from the request but not from the transport.
"""
existing = DummyProducer()
channel = DummyChannel()
transport = StringTransport()
channel.transport = transport
transport.registerProducer(existing, True)
req = http.Request(channel, True)
req.registerProducer(DummyProducer(), False)
req.unregisterProducer()
self.assertEqual((None, existing), (req.producer, transport.producer))
def test_unregisterQueuedStreamingProducer(self):
"""
L{Request.unregisterProducer} unregisters a queued streaming producer
from the request but not from the transport.
"""
existing = DummyProducer()
channel = DummyChannel()
transport = StringTransport()
channel.transport = transport
transport.registerProducer(existing, True)
req = http.Request(channel, True)
req.registerProducer(DummyProducer(), True)
req.unregisterProducer()
self.assertEqual((None, existing), (req.producer, transport.producer))
class MultilineHeadersTestCase(unittest.TestCase):
"""
Tests to exercise handling of multiline headers by L{HTTPClient}. RFCs 1945
(HTTP 1.0) and 2616 (HTTP 1.1) state that HTTP message header fields can
span multiple lines if each extra line is preceded by at least one space or
horizontal tab.
"""
def setUp(self):
"""
Initialize variables used to verify that the header-processing functions
are getting called.
"""
self.handleHeaderCalled = False
self.handleEndHeadersCalled = False
# Dictionary of sample complete HTTP header key/value pairs, including
# multiline headers.
expectedHeaders = {b'Content-Length': b'10',
b'X-Multiline' : b'line-0\tline-1',
b'X-Multiline2' : b'line-2 line-3'}
def ourHandleHeader(self, key, val):
"""
Dummy implementation of L{HTTPClient.handleHeader}.
"""
self.handleHeaderCalled = True
self.assertEqual(val, self.expectedHeaders[key])
def ourHandleEndHeaders(self):
"""
Dummy implementation of L{HTTPClient.handleEndHeaders}.
"""
self.handleEndHeadersCalled = True
def test_extractHeader(self):
"""
A header isn't processed by L{HTTPClient.extractHeader} until it is
confirmed in L{HTTPClient.lineReceived} that the header has been
received completely.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived(b'HTTP/1.0 201')
c.lineReceived(b'Content-Length: 10')
self.assertIdentical(c.length, None)
self.assertFalse(self.handleHeaderCalled)
self.assertFalse(self.handleEndHeadersCalled)
# Signal end of headers.
c.lineReceived(b'')
self.assertTrue(self.handleHeaderCalled)
self.assertTrue(self.handleEndHeadersCalled)
self.assertEqual(c.length, 10)
def test_noHeaders(self):
"""
An HTTP request with no headers will not cause any calls to
L{handleHeader} but will cause L{handleEndHeaders} to be called on
L{HTTPClient} subclasses.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived(b'HTTP/1.0 201')
# Signal end of headers.
c.lineReceived(b'')
self.assertFalse(self.handleHeaderCalled)
self.assertTrue(self.handleEndHeadersCalled)
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
def test_multilineHeaders(self):
"""
L{HTTPClient} parses multiline headers by buffering header lines until
an empty line or a line that does not start with whitespace hits
lineReceived, confirming that the header has been received completely.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived(b'HTTP/1.0 201')
c.lineReceived(b'X-Multiline: line-0')
self.assertFalse(self.handleHeaderCalled)
# Start continuing line with a tab.
c.lineReceived(b'\tline-1')
c.lineReceived(b'X-Multiline2: line-2')
# The previous header must be complete, so now it can be processed.
self.assertTrue(self.handleHeaderCalled)
# Start continuing line with a space.
c.lineReceived(b' line-3')
c.lineReceived(b'Content-Length: 10')
# Signal end of headers.
c.lineReceived(b'')
self.assertTrue(self.handleEndHeadersCalled)
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.length, 10)
class Expect100ContinueServerTests(unittest.TestCase, ResponseTestMixin):
"""
Test that the HTTP server handles 'Expect: 100-continue' header correctly.
The tests in this class all assume a simplistic behavior where user code
cannot choose to deny a request. Once ticket #288 is implemented and user
code can run before the body of a POST is processed this should be
extended to support overriding this behavior.
"""
def test_HTTP10(self):
"""
HTTP/1.0 requests do not get 100-continue returned, even if 'Expect:
100-continue' is included (RFC 2616 10.1.1).
"""
transport = StringTransport()
channel = http.HTTPChannel()
channel.requestFactory = DummyHTTPHandler
channel.makeConnection(transport)
channel.dataReceived(b"GET / HTTP/1.0\r\n")
channel.dataReceived(b"Host: www.example.com\r\n")
channel.dataReceived(b"Content-Length: 3\r\n")
channel.dataReceived(b"Expect: 100-continue\r\n")
channel.dataReceived(b"\r\n")
self.assertEqual(transport.value(), b"")
channel.dataReceived(b"abc")
self.assertResponseEquals(
transport.value(),
[(b"HTTP/1.0 200 OK",
b"Command: GET",
b"Content-Length: 13",
b"Version: HTTP/1.0",
b"Request: /",
b"'''\n3\nabc'''\n")])
def test_expect100ContinueHeader(self):
"""
If a HTTP/1.1 client sends a 'Expect: 100-continue' header, the server
responds with a 100 response code before handling the request body, if
any. The normal resource rendering code will then be called, which
will send an additional response code.
"""
transport = StringTransport()
channel = http.HTTPChannel()
channel.requestFactory = DummyHTTPHandler
channel.makeConnection(transport)
channel.dataReceived(b"GET / HTTP/1.1\r\n")
channel.dataReceived(b"Host: www.example.com\r\n")
channel.dataReceived(b"Expect: 100-continue\r\n")
channel.dataReceived(b"Content-Length: 3\r\n")
# The 100 continue response is not sent until all headers are
# received:
self.assertEqual(transport.value(), b"")
channel.dataReceived(b"\r\n")
# The 100 continue response is sent *before* the body is even
# received:
self.assertEqual(transport.value(), b"HTTP/1.1 100 Continue\r\n\r\n")
channel.dataReceived(b"abc")
response = transport.value()
self.assertTrue(
response.startswith(b"HTTP/1.1 100 Continue\r\n\r\n"))
response = response[len(b"HTTP/1.1 100 Continue\r\n\r\n"):]
self.assertResponseEquals(
response,
[(b"HTTP/1.1 200 OK",
b"Command: GET",
b"Content-Length: 13",
b"Version: HTTP/1.1",
b"Request: /",
b"'''\n3\nabc'''\n")])
def test_expect100ContinueWithPipelining(self):
"""
If a HTTP/1.1 client sends a 'Expect: 100-continue' header, followed
by another pipelined request, the 100 response does not interfere with
the response to the second request.
"""
transport = StringTransport()
channel = http.HTTPChannel()
channel.requestFactory = DummyHTTPHandler
channel.makeConnection(transport)
channel.dataReceived(
b"GET / HTTP/1.1\r\n"
b"Host: www.example.com\r\n"
b"Expect: 100-continue\r\n"
b"Content-Length: 3\r\n"
b"\r\nabc"
b"POST /foo HTTP/1.1\r\n"
b"Host: www.example.com\r\n"
b"Content-Length: 4\r\n"
b"\r\ndefg")
response = transport.value()
self.assertTrue(
response.startswith(b"HTTP/1.1 100 Continue\r\n\r\n"))
response = response[len(b"HTTP/1.1 100 Continue\r\n\r\n"):]
self.assertResponseEquals(
response,
[(b"HTTP/1.1 200 OK",
b"Command: GET",
b"Content-Length: 13",
b"Version: HTTP/1.1",
b"Request: /",
b"'''\n3\nabc'''\n"),
(b"HTTP/1.1 200 OK",
b"Command: POST",
b"Content-Length: 14",
b"Version: HTTP/1.1",
b"Request: /foo",
b"'''\n4\ndefg'''\n")])
def sub(keys, d):
"""
Create a new dict containing only a subset of the items of an existing
dict.
@param keys: An iterable of the keys which will be added (with values from
C{d}) to the result.
@param d: The existing L{dict} from which to copy items.
@return: The new L{dict} with keys given by C{keys} and values given by the
corresponding values in C{d}.
@rtype: L{dict}
"""
return dict([(k, d[k]) for k in keys])
class DeprecatedRequestAttributesTests(unittest.TestCase):
"""
Tests for deprecated attributes of L{twisted.web.http.Request}.
"""
def test_readHeaders(self):
"""
Reading from the C{headers} attribute is deprecated in favor of use of
the C{responseHeaders} attribute.
"""
request = http.Request(DummyChannel(), True)
request.headers
warnings = self.flushWarnings(
offendingFunctions=[self.test_readHeaders])
self.assertEqual({
"category": DeprecationWarning,
"message": (
"twisted.web.http.Request.headers was deprecated in "
"Twisted 13.2.0: Please use twisted.web.http.Request."
"responseHeaders instead.")},
sub(["category", "message"], warnings[0]))
def test_writeHeaders(self):
"""
Writing to the C{headers} attribute is deprecated in favor of use of
the C{responseHeaders} attribute.
"""
request = http.Request(DummyChannel(), True)
request.headers = {b"foo": b"bar"}
warnings = self.flushWarnings(
offendingFunctions=[self.test_writeHeaders])
self.assertEqual({
"category": DeprecationWarning,
"message": (
"twisted.web.http.Request.headers was deprecated in "
"Twisted 13.2.0: Please use twisted.web.http.Request."
"responseHeaders instead.")},
sub(["category", "message"], warnings[0]))
def test_readReceivedHeaders(self):
"""
Reading from the C{received_headers} attribute is deprecated in favor
of use of the C{requestHeaders} attribute.
"""
request = http.Request(DummyChannel(), True)
request.received_headers
warnings = self.flushWarnings(
offendingFunctions=[self.test_readReceivedHeaders])
self.assertEqual({
"category": DeprecationWarning,
"message": (
"twisted.web.http.Request.received_headers was deprecated "
"in Twisted 13.2.0: Please use twisted.web.http.Request."
"requestHeaders instead.")},
sub(["category", "message"], warnings[0]))
def test_writeReceivedHeaders(self):
"""
Writing to the C{received_headers} attribute is deprecated in favor of use of
the C{requestHeaders} attribute.
"""
request = http.Request(DummyChannel(), True)
request.received_headers = {b"foo": b"bar"}
warnings = self.flushWarnings(
offendingFunctions=[self.test_writeReceivedHeaders])
self.assertEqual({
"category": DeprecationWarning,
"message": (
"twisted.web.http.Request.received_headers was deprecated "
"in Twisted 13.2.0: Please use twisted.web.http.Request."
"requestHeaders instead.")},
sub(["category", "message"], warnings[0]))
|
skycucumber/Messaging-Gateway
|
webapp/venv/lib/python2.7/site-packages/twisted/web/test/test_http.py
|
Python
|
gpl-2.0
| 74,776
|
import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.file.factory import ConfigurationFactory
from programy.config.client.client import ClientConfiguration
class NowAskMeTrainTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
self.configuration = ClientConfiguration()
ConfigurationFactory.load_configuration_from_file(self.configuration, os.path.dirname(__file__)+"/testconfig.yaml")
class TrainAIMLTests(unittest.TestCase):
def setUp(cls):
TrainAIMLTests.test_client = NowAskMeTrainTestClient()
def test_now_ask_me(self):
TrainAIMLTests.test_client.bot.brain.dump_tree()
response = TrainAIMLTests.test_client.bot.ask_question("test", "daddy is great")
self.assertIsNotNone(response)
self.assertEqual('Now you can ask me: "Who IS GREAT?" and "What does my DADDY be?"', response)
|
dkamotsky/program-y
|
src/test/aiml_tests/train_tests/test_now_ask_me.py
|
Python
|
mit
| 966
|
"""
WSGI config for project_manager project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_manager.settings")
application = get_wsgi_application()
|
maboily/project_manager
|
project_manager/wsgi.py
|
Python
|
apache-2.0
| 408
|
#This is obtained from the examples of the hangups application.
import asyncio
import hangups
import messaging.authentication as authentication
from config import ConfigurationReader
def connect_to_hangouts(self):
try:
cookies = hangups.auth.get_auth_stdin(ConfigurationReader._refresh_token)
except hangups.GoogleAuthError as e:
sys.exit('Login failed ({})'.format(e))
def send_alert(message):
# Obtain hangups authentication cookies.
cookies = authentication.get_auth(ConfigurationReader._refresh_token)
# Instantiate hangups Client instance.
client = hangups.Client(cookies)
# Add an observer to the on_connect event to run the send_message when hangups has finished connecting.
client.on_connect.add_observer(lambda: asyncio.async(send_message(client, message)))
# Start an asyncio event loop by running Client.connect. This will not
# return until Client.disconnect is called, or hangups becomes
# disconnected.
loop = asyncio.get_event_loop()
loop.run_until_complete(client.connect())
@asyncio.coroutine
def send_message(client, message):
"""Send message using connected hangups. Client instance."""
# Instantiate a SendChatMessageRequest Protocol Buffer message describing the request.
request = hangups.hangouts_pb2.SendChatMessageRequest(
request_header=client.get_request_header(),
event_request_header=hangups.hangouts_pb2.EventRequestHeader(
conversation_id=hangups.hangouts_pb2.ConversationId(
id=ConfigurationReader._conversation_id
),
client_generated_id=client.get_client_generated_id(),
),
message_content=hangups.hangouts_pb2.MessageContent(
segment=[hangups.ChatMessageSegment(message).serialize()],
),
)
try:
# Make the request to the Hangouts API.
print("Sending message '", message, "'.")
yield from client.send_chat_message(request)
finally:
# Disconnect the hangups Client to make client.connect return.
yield from client.disconnect()
|
jorgehortelano/RedOwl
|
BlackGecko/messaging/alert_client.py
|
Python
|
gpl-3.0
| 1,929
|
from __future__ import absolute_import
import os
import shutil
import tempfile
import numpy as np
from numpy.testing import assert_array_equal
from pandas import Series
from shapely.geometry import (Polygon, Point, LineString,
MultiPoint, MultiLineString, MultiPolygon)
from shapely.geometry.base import BaseGeometry
from geopandas import GeoSeries
from .util import unittest, geom_equals, geom_almost_equals
class TestSeries(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])
self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])
self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
self.g1 = GeoSeries([self.t1, self.sq])
self.g2 = GeoSeries([self.sq, self.t1])
self.g3 = GeoSeries([self.t1, self.t2])
self.g3.crs = {'init': 'epsg:4326', 'no_defs': True}
self.g4 = GeoSeries([self.t2, self.t1])
self.na = GeoSeries([self.t1, self.t2, Polygon()])
self.na_none = GeoSeries([self.t1, self.t2, None])
self.a1 = self.g1.copy()
self.a1.index = ['A', 'B']
self.a2 = self.g2.copy()
self.a2.index = ['B', 'C']
self.esb = Point(-73.9847, 40.7484)
self.sol = Point(-74.0446, 40.6893)
self.landmarks = GeoSeries([self.esb, self.sol],
crs={'init': 'epsg:4326', 'no_defs': True})
self.l1 = LineString([(0, 0), (0, 1), (1, 1)])
self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])
self.g5 = GeoSeries([self.l1, self.l2])
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_single_geom_constructor(self):
p = Point(1,2)
line = LineString([(2, 3), (4, 5), (5, 6)])
poly = Polygon([(0, 0), (1, 0), (1, 1)],
[[(.1, .1), (.9, .1), (.9, .9)]])
mp = MultiPoint([(1, 2), (3, 4), (5, 6)])
mline = MultiLineString([[(1, 2), (3, 4), (5, 6)], [(7, 8), (9, 10)]])
poly2 = Polygon([(1, 1), (1, -1), (-1, -1), (-1, 1)],
[[(.5, .5), (.5, -.5), (-.5, -.5), (-.5, .5)]])
mpoly = MultiPolygon([poly, poly2])
geoms = [p, line, poly, mp, mline, mpoly]
index = ['a', 'b', 'c', 'd']
for g in geoms:
gs = GeoSeries(g)
self.assert_(len(gs) == 1)
self.assert_(gs.iloc[0] is g)
gs = GeoSeries(g, index=index)
self.assert_(len(gs) == len(index))
for x in gs:
self.assert_(x is g)
def test_copy(self):
gc = self.g3.copy()
self.assertTrue(type(gc) is GeoSeries)
self.assertEqual(self.g3.name, gc.name)
self.assertEqual(self.g3.crs, gc.crs)
def test_in(self):
self.assertTrue(self.t1 in self.g1)
self.assertTrue(self.sq in self.g1)
self.assertTrue(self.t1 in self.a1)
self.assertTrue(self.t2 in self.g3)
self.assertTrue(self.sq not in self.g3)
self.assertTrue(5 not in self.g3)
def test_geom_equals(self):
self.assertTrue(np.alltrue(self.g1.geom_equals(self.g1)))
assert_array_equal(self.g1.geom_equals(self.sq), [False, True])
def test_geom_equals_align(self):
a = self.a1.geom_equals(self.a2)
self.assertFalse(a['A'])
self.assertTrue(a['B'])
self.assertFalse(a['C'])
def test_align(self):
a1, a2 = self.a1.align(self.a2)
self.assertTrue(a2['A'].is_empty)
self.assertTrue(a1['B'].equals(a2['B']))
self.assertTrue(a1['C'].is_empty)
def test_geom_almost_equals(self):
# TODO: test decimal parameter
self.assertTrue(np.alltrue(self.g1.geom_almost_equals(self.g1)))
assert_array_equal(self.g1.geom_almost_equals(self.sq), [False, True])
def test_geom_equals_exact(self):
# TODO: test tolerance parameter
self.assertTrue(np.alltrue(self.g1.geom_equals_exact(self.g1, 0.001)))
assert_array_equal(self.g1.geom_equals_exact(self.sq, 0.001), [False, True])
def test_to_file(self):
""" Test to_file and from_file """
tempfilename = os.path.join(self.tempdir, 'test.shp')
self.g3.to_file(tempfilename)
# Read layer back in?
s = GeoSeries.from_file(tempfilename)
self.assertTrue(all(self.g3.geom_equals(s)))
# TODO: compare crs
def test_representative_point(self):
self.assertTrue(np.alltrue(self.g1.contains(self.g1.representative_point())))
self.assertTrue(np.alltrue(self.g2.contains(self.g2.representative_point())))
self.assertTrue(np.alltrue(self.g3.contains(self.g3.representative_point())))
self.assertTrue(np.alltrue(self.g4.contains(self.g4.representative_point())))
def test_transform(self):
utm18n = self.landmarks.to_crs(epsg=26918)
lonlat = utm18n.to_crs(epsg=4326)
self.assertTrue(np.alltrue(self.landmarks.geom_almost_equals(lonlat)))
with self.assertRaises(ValueError):
self.g1.to_crs(epsg=4326)
with self.assertRaises(TypeError):
self.landmarks.to_crs(crs=None, epsg=None)
def test_fillna(self):
na = self.na_none.fillna(Point())
self.assertTrue(isinstance(na[2], BaseGeometry))
self.assertTrue(na[2].is_empty)
self.assertTrue(geom_equals(self.na_none[:2], na[:2]))
# XXX: method works inconsistently for different pandas versions
#self.na_none.fillna(method='backfill')
def test_coord_slice(self):
""" Test CoordinateSlicer """
# need some better test cases
self.assertTrue(geom_equals(self.g3, self.g3.cx[:, :]))
self.assertTrue(geom_equals(self.g3[[True, False]], self.g3.cx[0.9:, :0.1]))
self.assertTrue(geom_equals(self.g3[[False, True]], self.g3.cx[0:0.1, 0.9:1.0]))
def test_geoseries_geointerface(self):
self.assertEqual(self.g1.__geo_interface__['type'], 'FeatureCollection')
self.assertEqual(len(self.g1.__geo_interface__['features']),
self.g1.shape[0])
if __name__ == '__main__':
unittest.main()
|
scw/geopandas
|
tests/test_geoseries.py
|
Python
|
bsd-3-clause
| 6,170
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("filer", "0001_initial"),
)
def forwards(self, orm):
# Adding field 'Post.key_visual'
db.add_column(u'aldryn_blog_post', 'key_visual',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.Image'], null=True, blank=True),
keep_default=False)
# Adding field 'Post.lead_in'
db.add_column(u'aldryn_blog_post', 'lead_in',
self.gf('djangocms_text_ckeditor.fields.HTMLField')(default=''),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Post.key_visual'
db.delete_column(u'aldryn_blog_post', 'key_visual_id')
# Deleting field 'Post.lead_in'
db.delete_column(u'aldryn_blog_post', 'lead_in')
models = {
u'aldryn_blog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin', 'db_table': "u'cmsplugin_latestentriesplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'latest_entries': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['taggit.Tag']", 'symmetrical': 'False', 'blank': 'True'})
},
u'aldryn_blog.post': {
'Meta': {'ordering': "['-publication_date']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_visual': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'lead_in': ('djangocms_text_ckeditor.fields.HTMLField', [], {'default': "''"}),
'publication_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['aldryn_blog']
|
aldryn/aldryn-blog
|
aldryn_blog/migrations/0002_auto__add_field_post_key_visual__add_field_post_lead_in.py
|
Python
|
bsd-3-clause
| 13,055
|
# Quick plot on when I gathered the data
from tweetloader import TweetLoader
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import pandas as pd
import matplotlib.patches as mpatches
def count_and_plot(raw, ax, start='2016-1-1', end='2016-6-24', freq='D', color='blue'):
df = raw.copy()
df.index = pd.DatetimeIndex(df['created_at'])
df = df.sort_index()
rng = pd.date_range(start, end, freq=freq)
counts = []
for i in range(len(rng) - 1):
num = df['id'][rng[i]:rng[i + 1]].count()
counts.append(num)
ax.bar(rng[1:], counts, color=color, lw=0, alpha=0.6)
return df
# h = TweetLoader('HillaryClinton')
# t = TweetLoader('realDonaldTrump')
h = TweetLoader('', path='data/backup/', filename='hillary_2016-07-13.json')
t = TweetLoader('', path='data/backup/', filename='trump_2016-07-13.json')
h.load()
t.load()
# bs = TweetLoader('BernieSanders', filename='sanders.json', track_location=False)
# bs.load()
# s = TweetLoader(filename='search.json', track_location=True)
s = TweetLoader(filename='search_2016-07-13.json', track_location=True, path='data/backup/')
s.load()
# Prepare plot
fig, ax = plt.subplots()
# Chart tweets with time
startdate = '2016-4-30'
enddate = '2016-7-15'
freq = 'D'
count_and_plot(h.tweets, ax, start=startdate, end=enddate, freq=freq, color='blue')
count_and_plot(t.tweets, ax, start=startdate, end=enddate, freq=freq, color='red')
count_and_plot(s.tweets, ax, start=startdate, end=enddate, freq=freq, color='black')
# Plot formatting
ax.set_ylabel('Number of Tweets')
ax.xaxis.set_minor_locator(dates.DayLocator(interval=15))
ax.xaxis.set_minor_formatter(dates.DateFormatter('%d'))
ax.xaxis.grid(True, which="major")
ax.xaxis.set_major_locator(dates.MonthLocator())
ax.xaxis.set_major_formatter(dates.DateFormatter('\n%b\n%Y'))
# ax.set_yscale("log")
plt.tight_layout()
# Make legend
red = mpatches.Patch(color='red', label='Donald Trump', alpha=0.6)
blue = mpatches.Patch(color='blue', label='Hillary Clinton', alpha=0.6)
black = mpatches.Patch(color='black', label='Search Results', alpha=0.6)
plt.legend(handles=[red, blue, black], loc='best')
plt.savefig('figures/tweets_time.png')
|
dr-rodriguez/The-Divided-States-of-America
|
scripts/twitter_sampling.py
|
Python
|
mit
| 2,187
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.