repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ojengwa/oh-mainline | vendor/packages/docutils/test/test_writers/test_docutils_xml.py | 16 | 5871 | #!/usr/bin/env python
# $Id: test_docutils_xml.py 7315 2012-01-18 10:16:20Z milde $
# Author: Lea Wiemann <LeWiemann@gmail.com>
# Copyright: This module has been placed in the public domain.
"""
Test for docutils XML writer.
.. Attention::
While the tests compare the output on the string-level, no guarantee
is given against changes to identical XML representations like
``<empty></empty>`` vs. ``<empty/>``. The sample strings in this test
module mirrors the current behaviour of the docutils_xml writer.
"""
from StringIO import StringIO
from __init__ import DocutilsTestSupport # must be imported before docutils
import docutils
import docutils.core
# sample strings
# --------------
source = u"""\
Test
----------
Test. \xe4\xf6\xfc\u20ac"""
xmldecl = u"""<?xml version="1.0" encoding="iso-8859-1"?>
"""
doctypedecl = u"""\
<!DOCTYPE document PUBLIC "+//IDN docutils.sourceforge.net\
//DTD Docutils Generic//EN//XML"\
"http://docutils.sourceforge.net/docs/ref/docutils.dtd">
"""
generatedby = u'<!-- Generated by Docutils %s -->\n' % docutils.__version__
bodynormal = u"""\
<document source="<string>"><paragraph>Test</paragraph>\
<transition></transition><paragraph>Test. \xe4\xf6\xfc€</paragraph>\
</document>"""
bodynewlines = u"""\
<document source="<string>">
<paragraph>Test</paragraph>
<transition>
</transition>
<paragraph>Test. \xe4\xf6\xfc€</paragraph>
</document>
"""
bodyindents = u"""\
<document source="<string>">
<paragraph>Test</paragraph>
<transition>
</transition>
<paragraph>Test. \xe4\xf6\xfc€</paragraph>
</document>
"""
# raw XML
# """""""
raw_xml_source = u"""\
.. raw:: xml
<root>
<child>Test \xe4\xf6\xfc\u20ac</child>
>
<
</root>
.. role:: xml(raw)
:format: xml
:xml:`<test>inline raw XML</test>`.
"""
raw_xml = u"""\
<document source="<string>">
<raw format="xml" xml:space="preserve"><root>
<child>Test \xe4\xf6\xfc€</child>
>
<
</root></raw>
<paragraph><raw classes="xml" format="xml" xml:space="preserve">\
<test>inline raw XML</test></raw>.</paragraph>
</document>
"""
invalid_raw_xml_source = u"""\
.. raw:: xml
<root>
<child>Test \xe4\xf6\xfc\u20ac</child>
</mismatch>
.. role:: xml(raw)
:format: xml
:xml:`<test>inline raw XML</test>`.
"""
invalid_raw_xml = u"""\
<document source="<string>">
<raw format="xml" xml:space="preserve"><root>
<child>Test \xe4\xf6\xfc\u20ac</child>
</mismatch></raw>
<paragraph><raw classes="xml" format="xml" xml:space="preserve">\
<test>inline raw XML</test></raw>.</paragraph>
</document>
"""
def publish_xml(settings, source):
return docutils.core.publish_string(source=source.encode('utf8'),
reader_name='standalone',
writer_name='docutils_xml',
settings_overrides=settings)
# XML Test Case
# -------------
class DocutilsXMLTestCase(DocutilsTestSupport.StandardTestCase):
settings = {'input_encoding': 'utf8',
'output_encoding': 'iso-8859-1',
'_disable_config': True,
'indents': False,
'newlines': True,
'xml_declaration': False,
'doctype_declaration': False,
}
def test_publish(self):
settings = self.settings.copy()
settings['newlines'] = False
for settings['xml_declaration'] in True, False:
for settings['doctype_declaration'] in True, False:
expected = u''
if settings['xml_declaration']:
expected += xmldecl
if settings['doctype_declaration']:
expected += doctypedecl
expected += generatedby
expected += bodynormal
result = publish_xml(settings, source)
self.assertEqual(result, expected.encode('latin1'))
def test_publish_indents(self):
settings = self.settings.copy()
settings['indents'] = True
result = publish_xml(settings, source)
expected = (generatedby + bodyindents).encode('latin1')
self.assertEqual(result, expected)
def test_publish_newlines(self):
settings = self.settings.copy()
result = publish_xml(settings, source)
expected = (generatedby + bodynewlines).encode('latin1')
self.assertEqual(result, expected)
def test_raw_xml(self):
result = publish_xml(self.settings, raw_xml_source)
expected = (generatedby
+ raw_xml).encode('latin1', 'xmlcharrefreplace')
self.assertEqual(result, expected)
def test_invalid_raw_xml(self):
warnings = StringIO()
settings = self.settings.copy()
settings['warning_stream'] = warnings
result = publish_xml(settings, invalid_raw_xml_source)
expected = (generatedby
+ invalid_raw_xml).encode('latin1', 'xmlcharrefreplace')
self.assertEqual(result, expected)
warnings.seek(0)
self.assertEqual(warnings.readlines(),
[u'<string>:5: '
u'(WARNING/2) Invalid raw XML in column 2, line offset 3:\n',
u'<root>\n',
u' <child>Test \xe4\xf6\xfc\u20ac</child>\n',
u'</mismatch>\n',
u'<string>:10: '
u'(WARNING/2) Invalid raw XML in column 30, line offset 1:\n',
u'<test>inline raw XML</test>\n'])
# abort with SystemMessage if halt_level is "info":
settings['halt_level'] = 2
settings['warning_stream'] = ''
self.assertRaises(docutils.utils.SystemMessage,
publish_xml, settings, invalid_raw_xml_source)
if __name__ == '__main__':
import unittest
unittest.main()
| agpl-3.0 |
acsone/social | mail_follower_custom_notification/models/mail_message.py | 2 | 1082 | # -*- coding: utf-8 -*-
# © 2015 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, models
class MailMessage(models.Model):
_inherit = 'mail.message'
@api.multi
def _notify(self, force_send=False, user_signature=True):
"""notify author if she's a follower and turned on force_own"""
self.ensure_one()
if self.subtype_id and self.model and self.res_id:
author_follower = self.env['mail.followers'].search([
('res_model', '=', self.model),
('res_id', '=', self.res_id),
('partner_id', '=', self.author_id.id),
('force_own_subtype_ids', '=', self.subtype_id.id),
])
self.env['mail.notification']._notify(
self.id, partners_to_notify=author_follower.partner_id.ids,
force_send=force_send, user_signature=user_signature)
return super(MailMessage, self)._notify(
self.id, force_send=force_send, user_signature=user_signature)
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2015_01_01/models/_models_py3.py | 1 | 2786 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import List, Optional, Union
import msrest.serialization
from ._management_lock_client_enums import *
class ManagementLockListResult(msrest.serialization.Model):
"""List of management locks.
:param value: The list of locks.
:type value: list[~azure.mgmt.resource.locks.v2015_01_01.models.ManagementLockObject]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagementLockObject]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ManagementLockObject"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ManagementLockListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ManagementLockObject(msrest.serialization.Model):
"""Management lock information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The Id of the lock.
:vartype id: str
:ivar type: The type of the lock.
:vartype type: str
:param name: The name of the lock.
:type name: str
:param level: The lock level of the management lock. Possible values include: "NotSpecified",
"CanNotDelete", "ReadOnly".
:type level: str or ~azure.mgmt.resource.locks.v2015_01_01.models.LockLevel
:param notes: The notes of the management lock.
:type notes: str
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'level': {'key': 'properties.level', 'type': 'str'},
'notes': {'key': 'properties.notes', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
level: Optional[Union[str, "LockLevel"]] = None,
notes: Optional[str] = None,
**kwargs
):
super(ManagementLockObject, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = name
self.level = level
self.notes = notes
| mit |
atosatto/ansible | lib/ansible/plugins/action/set_fact.py | 53 | 1987 | # Copyright 2013 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.constants import mk_boolean as boolean
from ansible.module_utils.six import iteritems, string_types
from ansible.plugins.action import ActionBase
from ansible.utils.vars import isidentifier
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
facts = dict()
if self._task.args:
for (k, v) in iteritems(self._task.args):
k = self._templar.template(k)
if not isidentifier(k):
result['failed'] = True
result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
"letters, numbers and underscores." % k)
return result
if isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'):
v = boolean(v)
facts[k] = v
result['changed'] = False
result['ansible_facts'] = facts
return result
| gpl-3.0 |
davvid/git-cola | cola/widgets/toolbar.py | 3 | 15971 | from __future__ import division, absolute_import, unicode_literals
from functools import partial
from qtpy import QtGui
from qtpy.QtCore import Qt
from qtpy import QtWidgets
from ..i18n import N_
from ..widgets import standard
from ..qtutils import get
from .. import icons
from .. import qtutils
from .toolbarcmds import COMMANDS
from . import defs
TREE_LAYOUT = {
'Others': ['Others::LaunchEditor', 'Others::RevertUnstagedEdits'],
'File': [
'File::NewRepo',
'File::OpenRepo',
'File::OpenRepoNewWindow',
'File::Refresh',
'File::EditRemotes',
'File::RecentModified',
'File::SaveAsTarZip',
'File::ApplyPatches',
'File::ExportPatches',
],
'Actions': [
'Actions::Fetch',
'Actions::Pull',
'Actions::Push',
'Actions::Stash',
'Actions::CreateTag',
'Actions::CherryPick',
'Actions::Merge',
'Actions::AbortMerge',
'Actions::UpdateSubmodules',
'Actions::Grep',
'Actions::Search',
],
'Commit@@verb': [
'Commit::Stage',
'Commit::AmendLast',
'Commit::UndoLastCommit',
'Commit::StageAll',
'Commit::UnstageAll',
'Commit::Unstage',
'Commit::LoadCommitMessage',
'Commit::GetCommitMessageTemplate',
],
'Diff': ['Diff::Difftool', 'Diff::Expression', 'Diff::Branches', 'Diff::Diffstat'],
'Branch': [
'Branch::Review',
'Branch::Create',
'Branch::Checkout',
'Branch::Delete',
'Branch::DeleteRemote',
'Branch::Rename',
'Branch::BrowseCurrent',
'Branch::BrowseOther',
'Branch::VisualizeCurrent',
'Branch::VisualizeAll',
],
'Reset': [
'Commit::UndoLastCommit',
'Commit::UnstageAll',
'Actions::ResetSoft',
'Actions::ResetMixed',
'Actions::RestoreWorktree',
'Actions::ResetKeep',
'Actions::ResetHard',
],
'View': ['View::DAG', 'View::FileBrowser'],
}
def configure(toolbar, parent=None):
"""Launches the Toolbar configure dialog"""
if not parent:
parent = qtutils.active_window()
view = ToolbarView(toolbar, parent)
view.show()
return view
def get_toolbars(widget):
return widget.findChildren(ToolBar)
def add_toolbar(context, widget):
toolbars = get_toolbars(widget)
name = 'ToolBar%d' % (len(toolbars) + 1)
toolbar = ToolBar.create(context, name)
widget.addToolBar(toolbar)
configure(toolbar)
class ToolBarState(object):
"""export_state() and apply_state() providers for toolbars"""
def __init__(self, context, widget):
"""widget must be a QMainWindow for toolBarArea(), etc."""
self.context = context
self.widget = widget
def apply_state(self, toolbars):
context = self.context
widget = self.widget
for data in toolbars:
toolbar = ToolBar.create(context, data['name'])
toolbar.load_items(data['items'])
toolbar.set_show_icons(data['show_icons'])
toolbar.setVisible(data['visible'])
toolbar_area = decode_toolbar_area(data['area'])
if data['break']:
widget.addToolBarBreak(toolbar_area)
widget.addToolBar(toolbar_area, toolbar)
# floating toolbars must be set after added
if data['float']:
toolbar.setWindowFlags(Qt.Tool | Qt.FramelessWindowHint)
toolbar.move(data['x'], data['y'])
# TODO: handle changed width when exists more than one toolbar in
# an area
def export_state(self):
result = []
widget = self.widget
toolbars = widget.findChildren(ToolBar)
for toolbar in toolbars:
toolbar_area = widget.toolBarArea(toolbar)
if toolbar_area == Qt.NoToolBarArea:
continue # filter out removed toolbars
items = [x.data() for x in toolbar.actions()]
result.append(
{
'name': toolbar.windowTitle(),
'area': encode_toolbar_area(toolbar_area),
'break': widget.toolBarBreak(toolbar),
'float': toolbar.isFloating(),
'x': toolbar.pos().x(),
'y': toolbar.pos().y(),
'width': toolbar.width(),
'height': toolbar.height(),
'show_icons': toolbar.show_icons(),
'visible': toolbar.isVisible(),
'items': items,
}
)
return result
class ToolBar(QtWidgets.QToolBar):
SEPARATOR = 'Separator'
@staticmethod
def create(context, name):
return ToolBar(context, name, TREE_LAYOUT, COMMANDS)
def __init__(self, context, title, tree_layout, toolbar_commands):
QtWidgets.QToolBar.__init__(self)
self.setWindowTitle(title)
self.setObjectName(title)
self.context = context
self.tree_layout = tree_layout
self.commands = toolbar_commands
def set_show_icons(self, show_icons):
if show_icons:
self.setToolButtonStyle(Qt.ToolButtonIconOnly)
else:
self.setToolButtonStyle(Qt.ToolButtonTextOnly)
def show_icons(self):
return self.toolButtonStyle() == Qt.ToolButtonIconOnly
def load_items(self, items):
for data in items:
self.add_action_from_data(data)
def add_action_from_data(self, data):
parent = data['parent']
child = data['child']
if child == self.SEPARATOR:
toolbar_action = self.addSeparator()
toolbar_action.setData(data)
return
tree_items = self.tree_layout.get(parent, [])
if child in tree_items and child in self.commands:
command = self.commands[child]
title = N_(command['title'])
callback = partial(command['action'], self.context)
icon = None
command_icon = command.get('icon', None)
if command_icon:
icon = getattr(icons, command_icon, None)
if callable(icon):
icon = icon()
if icon:
toolbar_action = self.addAction(icon, title, callback)
else:
toolbar_action = self.addAction(title, callback)
toolbar_action.setData(data)
tooltip = command.get('tooltip', None)
if tooltip:
toolbar_action.setToolTip('%s\n%s' % (title, tooltip))
def delete_toolbar(self):
self.parent().removeToolBar(self)
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu()
menu.addAction(N_('Configure toolbar'), partial(configure, self))
menu.addAction(N_('Delete toolbar'), self.delete_toolbar)
menu.exec_(event.globalPos())
def encode_toolbar_area(toolbar_area):
"""Encode a Qt::ToolBarArea as a string"""
if toolbar_area == Qt.LeftToolBarArea:
result = 'left'
elif toolbar_area == Qt.RightToolBarArea:
result = 'right'
elif toolbar_area == Qt.TopToolBarArea:
result = 'top'
elif toolbar_area == Qt.BottomToolBarArea:
result = 'bottom'
else: # fallback to "bottom"
result = 'bottom'
return result
def decode_toolbar_area(string):
"""Decode an encoded toolbar area string into a Qt::ToolBarArea"""
if string == 'left':
result = Qt.LeftToolBarArea
elif string == 'right':
result = Qt.RightToolBarArea
elif string == 'top':
result = Qt.TopToolBarArea
elif string == 'bottom':
result = Qt.BottomToolBarArea
else:
result = Qt.BottomToolBarArea
return result
class ToolbarView(standard.Dialog):
"""Provides the git-cola 'ToolBar' configure dialog"""
SEPARATOR_TEXT = '----------------------------'
def __init__(self, toolbar, parent=None):
standard.Dialog.__init__(self, parent)
self.setWindowTitle(N_('Configure toolbar'))
self.toolbar = toolbar
self.left_list = ToolbarTreeWidget(self)
self.right_list = DraggableListWidget(self)
self.text_toolbar_name = QtWidgets.QLabel()
self.text_toolbar_name.setText(N_('Name'))
self.toolbar_name = QtWidgets.QLineEdit()
self.toolbar_name.setText(toolbar.windowTitle())
self.add_separator = qtutils.create_button(N_('Add Separator'))
self.remove_item = qtutils.create_button(N_('Remove Element'))
checked = toolbar.show_icons()
checkbox_text = N_('Show icon? (if available)')
self.show_icon = qtutils.checkbox(checkbox_text, checkbox_text, checked)
self.apply_button = qtutils.ok_button(N_('Apply'))
self.close_button = qtutils.close_button()
self.close_button.setDefault(True)
self.right_actions = qtutils.hbox(
defs.no_margin, defs.spacing, self.add_separator, self.remove_item
)
self.name_layout = qtutils.hbox(
defs.no_margin, defs.spacing, self.text_toolbar_name, self.toolbar_name
)
self.left_layout = qtutils.vbox(defs.no_margin, defs.spacing, self.left_list)
self.right_layout = qtutils.vbox(
defs.no_margin, defs.spacing, self.right_list, self.right_actions
)
self.top_layout = qtutils.hbox(
defs.no_margin, defs.spacing, self.left_layout, self.right_layout
)
self.actions_layout = qtutils.hbox(
defs.no_margin,
defs.spacing,
self.show_icon,
qtutils.STRETCH,
self.close_button,
self.apply_button,
)
self.main_layout = qtutils.vbox(
defs.margin,
defs.spacing,
self.name_layout,
self.top_layout,
self.actions_layout,
)
self.setLayout(self.main_layout)
qtutils.connect_button(self.add_separator, self.add_separator_action)
qtutils.connect_button(self.remove_item, self.remove_item_action)
qtutils.connect_button(self.apply_button, self.apply_action)
qtutils.connect_button(self.close_button, self.accept)
self.load_right_items()
self.load_left_items()
self.init_size(parent=parent)
def load_right_items(self):
commands = self.toolbar.commands
for action in self.toolbar.actions():
data = action.data()
if data['child'] == self.toolbar.SEPARATOR:
self.add_separator_action()
else:
try:
child_data = data['child']
command = commands[child_data]
except KeyError:
pass
title = command['title']
icon = command.get('icon', None)
tooltip = command.get('tooltip', None)
self.right_list.add_item(title, tooltip, data, icon)
def load_left_items(self):
commands = self.toolbar.commands
for parent in self.toolbar.tree_layout:
top = self.left_list.insert_top(parent)
for item in self.toolbar.tree_layout[parent]:
try:
command = commands[item]
except KeyError:
pass
icon = command.get('icon', None)
tooltip = command.get('tooltip', None)
child = create_child(parent, item, command['title'], tooltip, icon)
top.appendRow(child)
top.sortChildren(0, Qt.AscendingOrder)
def add_separator_action(self):
data = {'parent': None, 'child': self.toolbar.SEPARATOR}
self.right_list.add_separator(self.SEPARATOR_TEXT, data)
def remove_item_action(self):
items = self.right_list.selectedItems()
for item in items:
self.right_list.takeItem(self.right_list.row(item))
def apply_action(self):
self.toolbar.clear()
self.toolbar.set_show_icons(get(self.show_icon))
self.toolbar.setWindowTitle(self.toolbar_name.text())
for item in self.right_list.get_items():
data = item.data(Qt.UserRole)
self.toolbar.add_action_from_data(data)
class DraggableListMixin(object):
items = []
def __init__(self, widget, Base):
self.widget = widget
self.Base = Base
widget.setAcceptDrops(True)
widget.setSelectionMode(widget.SingleSelection)
widget.setDragEnabled(True)
widget.setDropIndicatorShown(True)
def dragEnterEvent(self, event):
widget = self.widget
self.Base.dragEnterEvent(widget, event)
def dragMoveEvent(self, event):
widget = self.widget
self.Base.dragMoveEvent(widget, event)
def dragLeaveEvent(self, event):
widget = self.widget
self.Base.dragLeaveEvent(widget, event)
def dropEvent(self, event):
widget = self.widget
event.setDropAction(Qt.MoveAction)
self.Base.dropEvent(widget, event)
def get_items(self):
widget = self.widget
base = self.Base
items = [base.item(widget, i) for i in range(base.count(widget))]
return items
# pylint: disable=too-many-ancestors
class DraggableListWidget(QtWidgets.QListWidget):
Mixin = DraggableListMixin
def __init__(self, parent=None):
QtWidgets.QListWidget.__init__(self, parent)
self.setAcceptDrops(True)
self.setSelectionMode(self.SingleSelection)
self.setDragEnabled(True)
self.setDropIndicatorShown(True)
self._mixin = self.Mixin(self, QtWidgets.QListWidget)
def dragEnterEvent(self, event):
return self._mixin.dragEnterEvent(event)
def dragMoveEvent(self, event):
return self._mixin.dragMoveEvent(event)
def dropEvent(self, event):
return self._mixin.dropEvent(event)
def add_separator(self, title, data):
item = QtWidgets.QListWidgetItem()
item.setText(title)
item.setData(Qt.UserRole, data)
self.addItem(item)
def add_item(self, title, tooltip, data, icon):
item = QtWidgets.QListWidgetItem()
item.setText(N_(title))
item.setData(Qt.UserRole, data)
if tooltip:
item.setToolTip(tooltip)
if icon:
icon_func = getattr(icons, icon)
item.setIcon(icon_func())
self.addItem(item)
def get_items(self):
return self._mixin.get_items()
# pylint: disable=too-many-ancestors
class ToolbarTreeWidget(standard.TreeView):
def __init__(self, parent):
standard.TreeView.__init__(self, parent)
self.setDragEnabled(True)
self.setDragDropMode(QtWidgets.QAbstractItemView.DragOnly)
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.setDropIndicatorShown(True)
self.setRootIsDecorated(True)
self.setHeaderHidden(True)
self.setAlternatingRowColors(False)
self.setSortingEnabled(False)
self.setModel(QtGui.QStandardItemModel())
def insert_top(self, title):
item = create_item(title, title)
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
self.model().insertRow(0, item)
self.model().sort(0)
return item
def create_child(parent, child, title, tooltip, icon):
data = {'parent': parent, 'child': child}
item = create_item(title, data)
if tooltip:
item.setToolTip(tooltip)
if icon:
icon_func = getattr(icons, icon, None)
item.setIcon(icon_func())
return item
def create_item(name, data):
item = QtGui.QStandardItem()
item.setEditable(False)
item.setDragEnabled(True)
item.setText(N_(name))
item.setData(data, Qt.UserRole)
return item
| gpl-2.0 |
asposebarcode/Aspose_BarCode_Cloud | Examples/Python/generating-saving/cloud-storage/generate-barcode-and-save-asposecloudstorage.py | 4 | 1910 | import asposebarcodecloud
from asposebarcodecloud.BarcodeApi import BarcodeApi
from asposebarcodecloud.BarcodeApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'../../data/config.properties'))
apiKey = config.get('AppConfig', 'api_key')
appSid = config.get('AppConfig', 'app_sid')
out_folder = config.get('AppConfig', 'out_folder')
data_folder = "../../data/" #resouece data folder
#ExStart:1
#Instantiate Aspose.Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose.Barcode API SDK
api_client = asposebarcodecloud.ApiClient.ApiClient(apiKey, appSid, True)
barcodeApi = BarcodeApi(api_client);
#Set the barcode file name created on server
name = "sample-barcode"
#Set Text to encode inside barcode.
text = "Aspose.BarCode"
#Set Barcode Symbology
type = "Code128"
#Set Generated Barcode Image Format
format = "jpeg"
try:
#invoke Aspose.BarCode Cloud SDK API to create barcode and put in cloud storage
response = barcodeApi.PutBarcodeGenerateFile(name, file=None, text=text, type=type, format=format)
if response.Status == "OK":
#download generated barcode from cloud storage
response = storageApi.GetDownload(Path=name)
outfilename = out_folder + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
#ExEnd:1 | mit |
zhangjunli177/sahara | sahara/utils/tempfiles.py | 18 | 1154 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import shutil
import tempfile
from sahara import exceptions as ex
from sahara.i18n import _
@contextlib.contextmanager
def tempdir(**kwargs):
argdict = kwargs.copy()
if 'dir' not in argdict:
argdict['dir'] = '/tmp/'
tmpdir = tempfile.mkdtemp(**argdict)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
raise ex.SystemError(
_("Failed to delete temp dir %(dir)s (reason: %(reason)s)") %
{'dir': tmpdir, 'reason': e})
| apache-2.0 |
MeanEYE/Sunflower | sunflower/plugins/file_list/plugin.py | 1 | 1465 | from __future__ import absolute_import
from .file_list import FileList
from .trash_list import TrashList
from .gio_extension import SambaExtension, FtpExtension, DavExtension, SftpExtension
from .gio_provider import NetworkProvider, TrashProvider, DavProvider, DavsProvider, Gphoto2Provider, MtpProvider
from .gio_provider import SambaProvider, FtpProvider, SftpProvider, ArchiveProvider
from .local_provider import LocalProvider
def register_plugin(application):
"""Register plugin classes with application."""
application.register_class('file_list', _('Local file list'), FileList)
application.register_class('trash_list', _('Trash can'), TrashList)
# register providers
application.register_provider(LocalProvider)
application.register_provider(SambaProvider)
application.register_provider(FtpProvider)
application.register_provider(SftpProvider)
application.register_provider(NetworkProvider)
application.register_provider(TrashProvider)
application.register_provider(DavProvider)
application.register_provider(DavsProvider)
application.register_provider(Gphoto2Provider)
application.register_provider(MtpProvider)
application.register_provider(ArchiveProvider)
# register mount manager extension
application.register_mount_manager_extension(SambaExtension)
application.register_mount_manager_extension(FtpExtension)
application.register_mount_manager_extension(SftpExtension)
application.register_mount_manager_extension(DavExtension)
| gpl-3.0 |
kivatu/kivy-bak | examples/canvas/lines_extended.py | 17 | 3590 | from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.widget import Widget
from kivy.lang import Builder
Builder.load_string('''
<LineEllipse1>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height)
Label:
center: root.center
text: 'Ellipse'
<LineEllipse2>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height, 90, 180)
Label:
center: root.center
text: 'Ellipse from 90 to 180'
# fun result with low segments!
<LineEllipse3>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height, 90, 720, 10)
Label:
center: root.center
text: 'Ellipse from 90 to 720\\n10 segments'
halign: 'center'
<LineCircle1>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2)
Label:
center: root.center
text: 'Circle'
<LineCircle2>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2, 90, 180)
Label:
center: root.center
text: 'Circle from 90 to 180'
<LineCircle3>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2, 90, 180, 10)
Label:
center: root.center
text: 'Circle from 90 to 180\\n10 segments'
halign: 'center'
<LineCircle4>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2, 0, 360)
Label:
center: root.center
text: 'Circle from 0 to 360'
halign: 'center'
<LineRectangle>:
canvas:
Color:
rgba: .1, .1, 1, .9
Line:
width: 2.
rectangle: (self.x, self.y, self.width, self.height)
Label:
center: root.center
text: 'Rectangle'
<LineBezier>:
canvas:
Color:
rgba: .1, .1, 1, .9
Line:
width: 2.
bezier: (self.x, self.y, self.center_x - 40, self.y + 100, self.center_x + 40, self.y - 100, self.right, self.y)
Label:
center: root.center
text: 'Bezier'
''')
class LineEllipse1(Widget):
pass
class LineEllipse2(Widget):
pass
class LineEllipse3(Widget):
pass
class LineCircle1(Widget):
pass
class LineCircle2(Widget):
pass
class LineCircle3(Widget):
pass
class LineCircle4(Widget):
pass
class LineRectangle(Widget):
pass
class LineBezier(Widget):
pass
class LineExtendedApp(App):
def build(self):
root = GridLayout(cols=2, padding=50, spacing=50)
root.add_widget(LineEllipse1())
root.add_widget(LineEllipse2())
root.add_widget(LineEllipse3())
root.add_widget(LineCircle1())
root.add_widget(LineCircle2())
root.add_widget(LineCircle3())
root.add_widget(LineCircle4())
root.add_widget(LineRectangle())
root.add_widget(LineBezier())
return root
if __name__ == '__main__':
LineExtendedApp().run()
| mit |
jnhwkim/cuda-convnet2 | initw.py | 183 | 2020 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.gpumodel import *
import numpy as n
import numpy.random as nr
def get_src(filename):
src = IGPUModel.load_checkpoint(filename)
return src['model_state']['layers']
# Initialize weight matrix by copying weight matrix of given layer
def makew(name, idx, shape, params):
src = get_src(params[0])
return src[name]['weights'][idx]
# Initialize bias vector by copying bias vector of given layer
def makeb(name, shape, params):
src = get_src(params[0])
return src[name]['biases']
def concat(shape, src, src_layers, src_func):
mat = n.empty(shape, dtype=n.single, order='F')
start = 0
for s in src_layers:
m = src_func(src[s])
mat[:,start:start+m.shape[1]] = m
start += m.shape[1]
return mat
# Initialize weight matrix by concatenating weight matrices of given layers
def makewcat(name, idx, shape, params):
src, src_layers = get_src(params[0]), params[1:]
return concat(shape, src, src_layers, lambda x: x['weights'][idx])
# Initialize bias vector by concatenating bias vectors of given layers
def makebcat(name, shape, params):
src, src_layers = get_src(params[0]), params[1:]
return concat(shape, src, src_layers, lambda x: x['biases'])
# Initialize bias vector from tuple input
def makeb_vec(name, shape, params):
return n.array([n.single(x) for x in params], dtype=n.single).reshape((1, len(params)))
| apache-2.0 |
GuneetAtwal/kernel_m8 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
davygeek/vitess | doc/vtctl_go_reference.py | 15 | 36144 | #!/usr/bin/python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
import json
import os
import optparse
import re
# TODO: Handle angle brackets that appear in command definitions --
# e.g. ChangeSlaveType
# Traverse directory to get list of all files in the directory.
def get_all_files(directory, filenames):
os.chdir(directory)
for path, dirs, files in os.walk('.'):
for filename in files:
filenames[os.path.join(path, filename)] = True
return filenames
# This needs to produce the same anchor ID as the Markdown processor.
def anchor_id(heading):
return heading.lower().replace(' ', '-').replace(',', '')
def write_header(doc, commands):
doc.write('This reference guide explains the commands that the ' +
'<b>vtctl</b> tool supports. **vtctl** is a command-line tool ' +
'used to administer a Vitess cluster, and it allows a human ' +
'or application to easily interact with a Vitess ' +
'implementation.\n\nCommands are listed in the ' +
'following groups:\n\n')
for group in sorted(commands):
group_link = anchor_id(group)
doc.write('* [' + group + '](#' + group_link + ')\n')
doc.write('\n\n')
def write_footer(doc):
doc.write(' </body>\n')
doc.write('</html>\n')
def create_reference_doc(root_directory, commands, arg_definitions):
doc = open(root_directory + '/doc/vtctlReference.md', 'w')
write_header(doc, commands)
not_found_arguments = {}
for group in sorted(commands):
doc.write('## ' + group + '\n\n');
for command in sorted(commands[group]):
if ('definition' in commands[group][command] and
commands[group][command]['definition'] != '' and
re.search(r'HIDDEN', commands[group][command]['definition'])):
print '\n\n****** ' + command + ' is hidden *******\n\n'
continue
command_link = anchor_id(command)
doc.write('* [' + command + '](#' + command_link + ')\n')
doc.write('\n')
for command in sorted(commands[group]):
if ('definition' in commands[group][command] and
commands[group][command]['definition'] != '' and
re.search(r'HIDDEN', commands[group][command]['definition'])):
continue
doc.write('### ' + command + '\n\n');
if ('definition' in commands[group][command] and
commands[group][command]['definition'] != ''):
commands[group][command]['definition'] = (
commands[group][command]['definition'].replace('<', '<'))
commands[group][command]['definition'] = (
commands[group][command]['definition'].replace('>', '>'))
commands[group][command]['definition'] = (
commands[group][command]['definition'].replace('</a>',
'</a>'))
commands[group][command]['definition'] = (
commands[group][command]['definition'].replace('<a href',
'<a href'))
commands[group][command]['definition'] = (
commands[group][command]['definition'].replace('">', '">'))
commands[group][command]['definition'] = (
commands[group][command]['definition'].replace('"<br>',
'<br>'))
commands[group][command]['definition'] = (
commands[group][command]['definition'].replace('\\n', '<br><br>'))
doc.write(commands[group][command]['definition'] + '\n\n')
if ('arguments' in commands[group][command] and
commands[group][command]['arguments']):
doc.write('#### Example\n\n')
arguments = commands[group][command]['arguments'].strip().strip(
'"').replace('<', '<')
arguments = arguments.strip().replace('>', '>')
doc.write('<pre class="command-example">%s %s</pre>\n\n' %
(command, arguments))
if ('argument_list' in commands[group][command] and
'flags' in commands[group][command]['argument_list']):
flag_text = ''
for command_flag in sorted(
commands[group][command]['argument_list']['flags']):
flag = (
commands[group][command]['argument_list']['flags'][command_flag])
flag_text += ('| %s | %s | %s |\n' % (command_flag, flag['type'],
flag['definition']))
if flag_text:
#flag_text = '<a name="' + command + '-flags"></a>\n\n' +
flag_text = ('| Name | Type | Definition |\n' +
'| :-------- | :--------- | :--------- |\n' +
flag_text + '\n')
doc.write('#### Flags\n\n' + flag_text + '\n')
if ('argument_list' in commands[group][command] and
'args' in commands[group][command]['argument_list']):
arg_text = ''
for arg in commands[group][command]['argument_list']['args']:
if 'name' in arg:
arg_name = arg['name']
new_arg_name = arg['name'].replace('<', '').replace('>', '')
if (new_arg_name[0:len(new_arg_name) - 1] in arg_definitions and
re.search(r'\d', new_arg_name[-1])):
arg_name = '<' + new_arg_name[0:len(new_arg_name) - 1] + '>'
arg_name = arg_name.strip().replace('<', 'START_CODE_TAG<')
arg_name = arg_name.strip().replace('>', '>END_CODE_TAG')
arg_text += '* ' + arg_name + ' – '
if 'required' in arg and arg['required']:
arg_text += 'Required.'
else:
arg_text += 'Optional.'
arg_values = None
temp_name = arg['name'].replace('<', '').replace('>', '')
if temp_name in arg_definitions:
arg_text += ' ' + arg_definitions[temp_name]['description']
if 'list_items' in arg_definitions[temp_name]:
arg_values = arg_definitions[temp_name]['list_items']
# Check if the argument name ends in a digit to catch things like
# keyspace1 being used to identify the first in a list of keyspaces.
elif (temp_name[0:len(temp_name) - 1] in arg_definitions and
re.search(r'\d', temp_name[-1])):
arg_length = len(temp_name) - 1
arg_text += (' ' +
arg_definitions[temp_name[0:arg_length]]['description'])
if 'list_items' in arg_definitions[temp_name[0:arg_length]]:
arg_values = (
arg_definitions[temp_name[0:arg_length]]['list_items'])
else:
not_found_arguments[arg['name']] = True
if arg_values:
arg_text += '\n\n'
for arg_value in sorted(arg_values, key=lambda k: k['value']):
arg_text += (' * <code>' + arg_value['value'] + '</code> ' +
'– ' + arg_value['definition'] + '\n')
arg_text += '\n\n'
if 'multiple' in arg:
separation = 'space'
if 'hasComma' in arg:
separation = 'comma'
arg_text += (' To specify multiple values for this argument, ' +
'separate individual values with a ' +
separation + '.')
arg_text += '\n'
if arg_text:
arg_text = arg_text.replace('START_CODE_TAG', '<code>')
arg_text = arg_text.replace('END_CODE_TAG', '</code>')
doc.write('#### Arguments\n\n' +
arg_text +
'\n')
if 'errors' in commands[group][command]:
errors_text = ''
if 'ARG_COUNT' in commands[group][command]['errors']:
error = commands[group][command]['errors']['ARG_COUNT']
message = re.sub(str(command), '<' + command + '>', error['message'])
#print 'message here'
#print message
#message = error['message'].replace(command, '<' + command + '>')
message = message.replace('<', 'START_CODE_TAG<')
message = message.replace('>', '>END_CODE_TAG')
errors_text += '* ' + message + ' '
if (error['exact_count'] and
len(error['exact_count']) == 1 and
error['exact_count'][0] == '1'):
errors_text += ('This error occurs if the command is not called ' +
'with exactly one argument.')
elif error['exact_count'] and len(error['exact_count']) == 1:
errors_text += ('This error occurs if the command is not called ' +
'with exactly ' + error['exact_count'][0] + ' ' +
'arguments.')
elif error['exact_count']:
allowed_error_counts = ' or '.join(error['exact_count'])
errors_text += ('This error occurs if the command is not called ' +
'with exactly ' + allowed_error_counts + ' ' +
'arguments.')
elif error['min_count'] and error['max_count']:
errors_text += ('This error occurs if the command is not called ' +
'with between ' + error['min_count'] + ' and ' +
error['max_count'] + ' arguments.')
elif error['min_count'] and error['min_count'] == '1':
errors_text += ('This error occurs if the command is not called ' +
'with at least one argument.')
elif error['min_count']:
errors_text += ('This error occurs if the command is not called ' +
'with at least ' + error['min_count'] + ' ' +
'arguments.')
elif error['max_count']:
errors_text += ('This error occurs if the command is not called ' +
'with more than ' + error['max_count'] + ' ' +
'arguments.')
errors_text += '\n'
if 'other' in commands[group][command]['errors']:
#print 'other errors'
#print commands[group][command]['errors']
for error in commands[group][command]['errors']['other']:
error = re.sub(str(command), '<' + command + '>', error)
if ('argument_list' in commands[group][command] and
'flags' in commands[group][command]['argument_list']):
for command_flag in sorted(
commands[group][command]['argument_list']['flags']):
error = re.sub(str(command_flag), '<' + command_flag + '>',
error)
error = error.replace('<', 'START_CODE_TAG<')
error = error.replace('>', '>END_CODE_TAG')
errors_text += '* ' + error + '\n'
if errors_text:
errors_text = errors_text.replace('START_CODE_TAG', '<code>')
errors_text = errors_text.replace('END_CODE_TAG', '</code>')
doc.write('#### Errors\n\n' + errors_text)
doc.write('\n\n')
#write_footer(doc)
doc.close()
#print json.dumps(not_found_arguments, sort_keys=True, indent=4)
return
def parse_arg_list(arguments, current_command):
last_char = ''
find_closing_square_bracket = False
has_comma = False
has_multiple = False
is_optional_argument = False
is_required_argument = False
current_argument = ''
new_arg_list = []
arg_count = 0
char_count = 1
for char in arguments:
if (last_char == '' or last_char == ' ') and char == '[':
find_closing_square_bracket = True
elif (last_char == '[' and
find_closing_square_bracket and
char == '<'):
is_optional_argument = True
current_argument += char
elif find_closing_square_bracket:
if char == ']':
if is_optional_argument:
new_arg_list.append({'name': current_argument,
'has_comma': has_comma,
'has_multiple': has_multiple,
'required': False})
arg_count += 1
find_closing_square_bracket = False
current_argument = ''
has_comma = False
has_multiple = False
elif is_optional_argument:
if char == ',':
has_comma = True
elif last_char == '.' and char == '.':
has_multiple = True
elif not has_comma:
current_argument += char
elif char == '<' and (last_char == '' or last_char == ' '):
is_required_argument = True
current_argument += char
elif char == ',':
has_comma = True
if last_char == '>':
new_arg_list[arg_count - 1]['hasComma'] = True
has_comma = False
elif is_required_argument:
current_argument += char
if char == '>' and current_argument:
if char_count == len(arguments[0]):
new_arg_list.append({'name': current_argument,
'required': True})
arg_count += 1
is_required_argument = False
current_argument = ''
else:
next_char = 'x'
if current_command == 'Resolve':
if char_count < len(arguments[0]):
next_char = arguments[0][char_count:char_count + 1]
if next_char and not next_char == '.' and not next_char == ':':
new_arg_list.append({'name': current_argument,
'required': True})
arg_count += 1
is_required_argument = False
current_argument = ''
elif (arg_count > 0 and
current_argument == '' and
last_char == '.' and
'multiple' in new_arg_list[arg_count - 1] and
char == ' '):
current_argument = ''
elif (arg_count > 0 and
current_argument == '' and
last_char == '.' and
char == '.'):
new_arg_list[arg_count - 1]['multiple'] = True
char_count += 1
last_char = char
return new_arg_list
def get_group_name_from_variable(file_path, variable_name):
vtctl_go_file = open(file_path, 'rU')
vtctl_go_data = vtctl_go_file.readlines()
vtctl_go_file.close()
for line in vtctl_go_data:
regex = r'const\s*' + re.escape(variable_name) + r'\s*=\s*\"([^\"]+)\"'
if re.search(regex, line):
return line.split('=')[1].strip().strip('"')
return variable_name
def main(root_directory):
arg_definitions = {}
commands = {}
command_groups = {}
error_counts = {}
functions = {}
# Read the .go files in the /vitess/go/vt/vtctl/ directory
vtctl_dir_path = root_directory + '/go/vt/vtctl/'
go_files = next(os.walk(vtctl_dir_path))[2]
for path in go_files:
if not path.endswith('.go'):
continue
vtctl_go_path = vtctl_dir_path + path
vtctl_go_file = open(vtctl_go_path, 'rU')
vtctl_go_data = vtctl_go_file.readlines()
vtctl_go_file.close()
add_command_syntax = False
get_commands = False
get_argument_definitions = False
get_wrong_arg_count_error = False
get_group_name = False
current_command_argument = ''
current_command_argument_value = ''
current_command = ''
current_function = ''
is_func_init = False
is_flag_section = False
# treat func init() same as var commands
# treat addCommand("Group Name"... same as command {... in vtctl.go group
# Reformat Generic Help command to same format as commands in backup.go
# and reparent.go.
# Add logic to capture command data from those commands.
for line in vtctl_go_data:
# skip comments and empty lines
if line.strip() == '' or line.strip().startswith('//'):
continue
if is_func_init and not is_flag_section and re.search(r'^if .+ {', line.strip()):
is_flag_section = True
elif is_func_init and not is_flag_section and line.strip() == 'servenv.OnRun(func() {':
pass
elif is_func_init and is_flag_section and line.strip() == 'return':
pass
elif is_func_init and is_flag_section and line.strip() == '}':
is_flag_section = False
elif is_func_init and (line.strip() == '}' or line.strip() == '})'):
is_func_init = False
elif get_commands:
# This line precedes a command group's name, e.g. "Tablets" or "Shards."
# Capture the group name on the next line.
if line.strip() == '{':
get_group_name = True
# Capture the name of a command group.
elif get_group_name:
# Regex to identify the group name. Line in code looks like:
# "Tablets", []command{
find_group = re.findall(r'\"([^\"]+)\", \[\]\s*command\s*\{', line)
if find_group:
current_group = find_group[0]
if current_group not in commands:
commands[current_group] = {}
get_group_name = False
# First line of a command in addCommand syntax. This contains the
# name of the group that the command is in. Line in code looks like:
# addCommand{"Shards", command{
elif re.search(r'^addCommand\(', line.strip()):
command_data = re.findall(r'addCommand\s*\(\s*([^\,]+),\s*command\{',
line)
if command_data:
current_group = command_data[0]
current_group_strip_quotes = current_group.strip('"')
if current_group == current_group_strip_quotes:
current_group = get_group_name_from_variable(vtctl_go_path,
current_group)
else:
current_group = current_group_strip_quotes
if current_group not in commands:
commands[current_group] = {}
add_command_syntax = True
elif add_command_syntax and is_func_init:
if not current_command:
current_command = line.strip().strip(',').strip('"')
commands[current_group][current_command] = {
'definition': '',
'argument_list': {
'flags': {},
'args': []
},
'errors': {'other': []}}
command_groups[current_command] = current_group
elif 'function' not in commands[current_group][current_command]:
function = line.strip().strip(',')
commands[current_group][current_command]['function'] = function
functions[function] = current_command
elif 'arguments' not in commands[current_group][current_command]:
arguments = line.strip().strip(',')
commands[current_group][current_command]['arguments'] = arguments
if arguments:
new_arg_list = parse_arg_list(arguments, current_command)
commands[current_group][current_command]['argument_list']['args'] = new_arg_list
else:
definition_list = line.strip().split(' +')
for definition_part in definition_list:
definition = definition_part.strip().strip('})')
definition = definition.replace('}},', '')
definition = definition.replace('"', '')
commands[current_group][current_command]['definition'] += (
definition)
if line.strip().endswith('})'):
current_command = ''
# Command definition ends with line ending in "},".
elif line.strip().endswith('})'):
current_command = ''
add_command_syntax = False
# First line of a command. This contains the command name and the
# function used to process the command. Line in code looks like:
# command{"ScrapTablet", commandScrapTablet,
elif re.search(r'^\{\s*\"[^\"]+\",\s*command[^\,]+\,', line.strip()):
command_data = re.findall(r'\{\s*\"([^\"]+)\",\s*([^\,]+)\,',
line)
if command_data:
# Capture the command name and associate it with its function.
# Create a data structure to contain information about the command
# and its processing rules.
current_command = command_data[0][0]
function = command_data[0][1]
commands[current_group][current_command] = {
'definition': '',
'argument_list': {
'flags': {},
'args': []
},
'errors': {'other': []}}
# Associate the function with the command and vice versa.
# Associate the command with its group and vice versa.
commands[current_group][current_command]['function'] = function
command_groups[current_command] = current_group
functions[function] = current_command
# If code has identified a command name but has not identified
# arguments for that command, capture the next line and store it
# as the command arguments.
elif (current_command and
'arguments' not in commands[current_group][current_command]):
arguments = re.findall(r'\"([^\"]+)\"', line)
if arguments:
commands[current_group][current_command]['arguments'] = arguments[0]
last_char = ''
find_closing_square_bracket = False
has_comma = False
has_multiple = False
is_optional_argument = False
is_required_argument = False
current_argument = ''
new_arg_list = []
arg_count = 0
char_count = 1
for char in arguments[0]:
if (last_char == '' or last_char == ' ') and char == '[':
find_closing_square_bracket = True
elif (last_char == '[' and
find_closing_square_bracket and
char == '<'):
is_optional_argument = True
current_argument += char
elif find_closing_square_bracket:
if char == ']':
if is_optional_argument:
new_arg_list.append({'name': current_argument,
'has_comma': has_comma,
'has_multiple': has_multiple,
'required': False})
arg_count += 1
find_closing_square_bracket = False
current_argument = ''
has_comma = False
has_multiple = False
elif is_optional_argument:
if char == ',':
has_comma = True
elif last_char == '.' and char == '.':
has_multiple = True
elif not has_comma:
current_argument += char
elif char == '<' and (last_char == '' or last_char == ' '):
is_required_argument = True
current_argument += char
elif char == ',':
has_comma = True
if last_char == '>':
new_arg_list[arg_count - 1]['hasComma'] = True
has_comma = False
elif is_required_argument:
current_argument += char
if char == '>' and current_argument:
if char_count == len(arguments[0]):
new_arg_list.append({'name': current_argument,
'required': True})
arg_count += 1
is_required_argument = False
current_argument = ''
else:
next_char = 'x'
if current_command == 'Resolve':
#print len(arguments[0])
#print char_count
if char_count < len(arguments[0]):
next_char = arguments[0][char_count:char_count + 1]
#print next_char
#if char_count < (len(arguments[0]) - 1):
# print current_argument
# next_char = arguments[0][char_count + 1:char_count + 2]
# print next_char
if next_char and not next_char == '.' and not next_char == ':':
new_arg_list.append({'name': current_argument,
'required': True})
arg_count += 1
is_required_argument = False
current_argument = ''
elif (arg_count > 0 and
current_argument == '' and
last_char == '.' and
'multiple' in new_arg_list[arg_count - 1] and
char == ' '):
current_argument = ''
elif (arg_count > 0 and
current_argument == '' and
last_char == '.' and
char == '.'):
new_arg_list[arg_count - 1]['multiple'] = True
#elif (arg_count > 0 and
# current_argument == '' and
# last_char == '.'):
char_count += 1
last_char = char
commands[current_group][current_command]['argument_list']['args'] = (
new_arg_list)
else:
commands[current_group][current_command]['arguments'] = ""
# If code has identified a command and arguments, capture remaining lines
# as the command description. Assume the description ends at the line
# of code ending with "},".
elif current_command:
definition_list = line.rstrip('},').split(' +')
for definition_part in definition_list:
definition = definition_part.strip().strip('"')
#definition = definition.replace('\\n', '<br><br>')
definition = definition.replace('"},', '')
commands[current_group][current_command]['definition'] += definition
if line.strip().endswith('},'):
current_command = ''
# Command definition ends with line ending in "},".
elif line.strip().endswith('},'):
current_command = ''
# Capture information about a function that processes a command.
# Here, identify the function name.
elif re.search(r'^func ([^\)]+)\(', line.strip()):
current_function = ''
function_data = re.findall(r'func ([^\)]+)\(', line.strip())
if function_data:
current_function = function_data[0]
elif current_function:
# Lines that contain this:
# = subFlags....
# generally seem to contain descriptions of flags for the function.
# Capture the content type of the argument, its name, default value,
# and description. Associate these with the command that calls this
# function.
if re.search(r'\=\s*subFlags\.([^\(]+)\(\s*\"([^\"]+)\"\,' +
'([^\,]+)\,\s*\"([^\"]+)\"', line):
argument_data = re.findall(r'\=\s*subFlags\.([^\(]+)' +
'\(\s*\"([^\"]+)\"\,([^\,]+)\,\s*\"([^\"]+)\"', line)
if argument_data and current_function in functions:
[arg_type, arg_name, arg_default, arg_definition] = argument_data[0]
if arg_type == 'Bool':
arg_type = 'Boolean'
if arg_type == 'String' or arg_type == 'int':
arg_type = arg_type.lower()
arg_default = arg_default.strip().strip('"')
fcommand = functions[current_function]
fgroup = command_groups[fcommand]
commands[fgroup][fcommand]['argument_list']['flags'][arg_name] = {
'type': arg_type,
'default': arg_default,
'definition': arg_definition
}
elif re.search(r'\s*subFlags\.Var\(\s*([^\,]+)\,\s*\"([^\"]+)\"\,' +
'\s*\"([^\"]+)\"', line):
#print 'found subFlags.Var'
argument_data = re.findall(r'\s*subFlags\.Var\(\s*([^\,]+)\,' +
'\s*\"([^\"]+)\"\,\s*\"([^\"]+)\"', line)
#print argument_data
if argument_data and current_function in functions:
[arg_type, arg_name, arg_definition] = argument_data[0]
arg_type = 'string' # Var?
fcommand = functions[current_function]
fgroup = command_groups[fcommand]
commands[fgroup][fcommand]['argument_list']['flags'][arg_name] = {
'type': arg_type,
'definition': arg_definition
}
# Capture information for errors that indicate that the command
# was called with the incorrect number of arguments. Use the
# code to determine whether the code is looking for an exact number
# of arguments, a minimum number, a maximum number, etc.
elif re.search(r'if subFlags.NArg', line):
wrong_arg_data = re.findall(
r'subFlags.NArg\(\)\s*([\<\>\!\=]+)\s*(\d+)', line)
error_counts[current_function] = {'min': None,
'max': None,
'exact': []}
if wrong_arg_data:
get_wrong_arg_count_error = True
for wrong_arg_info in wrong_arg_data:
if wrong_arg_info[0] == '!=':
error_counts[current_function]['exact'].append(
wrong_arg_info[1])
elif wrong_arg_info[0] == '<':
error_counts[current_function]['min'] = wrong_arg_info[1]
elif wrong_arg_info[0] == '>':
error_counts[current_function]['max'] = wrong_arg_info[1]
elif wrong_arg_info[0] == '==' and wrong_arg_info[1] == '0':
error_counts[current_function]['min'] = '1'
# Capture data about other errors that the command might yield.
# TODO: Capture other errors from other files, such as
# //depot/google3/third_party/golang/vitess/go/vt/topo/tablet.go
elif get_wrong_arg_count_error and re.search(r'fmt.Errorf', line):
get_wrong_arg_count_error = False
error_data = re.findall(r'fmt\.Errorf\(\"([^\"]+)\"\)', line)
if error_data and current_function in functions:
fcommand = functions[current_function]
fgroup = command_groups[fcommand]
commands[fgroup][fcommand]['errors']['ARG_COUNT'] = {
'exact_count': error_counts[current_function]['exact'],
'min_count': error_counts[current_function]['min'],
'max_count': error_counts[current_function]['max'],
'message': error_data[0]
}
elif line.strip().endswith('}') or line.strip().endswith('{'):
get_wrong_arg_count_error = False
elif current_function in functions and re.search(r'fmt.Errorf', line):
error_data = re.findall(r'fmt\.Errorf\(\"([^\"]+)\"', line)
if error_data:
fcommand = functions[current_function]
fgroup = command_groups[fcommand]
if (error_data[0] not in
commands[fgroup][fcommand]['errors']['other']):
if ('ARG_COUNT' in commands[fgroup][fcommand]['errors'] and
error_data[0] !=
commands[fgroup][fcommand]['errors']['ARG_COUNT']['message']):
commands[fgroup][fcommand]['errors']['other'].append(
error_data[0])
elif 'ARG_COUNT' not in commands[fgroup][fcommand]['errors']:
commands[fgroup][fcommand]['errors']['other'].append(
error_data[0])
# This line indicates that commands are starting. No need to capture
# stuff before here.
elif re.search(r'^var commands =', line.strip()):
get_commands = True
elif re.search(r'^func init\(\)', line.strip()):
get_commands = True
is_func_init = True
if get_argument_definitions:
if line.strip() == '*/':
get_argument_definitions = False
elif line.startswith('-'):
definition_data = re.findall(r'^\-\s*([^\:]+)\:\s*(.*)', line)
if definition_data:
arg_name_array = definition_data[0][0].split('(internal)')
current_command_argument = arg_name_array[0].strip()
arg_definitions[current_command_argument] = {}
if len(arg_name_array) > 1:
arg_definitions[current_command_argument]['internal'] = True
arg_definitions[current_command_argument]['description'] = (
definition_data[0][1].strip())
current_command_argument_value = ''
elif current_command_argument and line.lstrip()[0:2] == '--':
if 'list_items' not in arg_definitions[current_command_argument]:
arg_definitions[current_command_argument]['list_items'] = []
arg_value_data = re.findall(r'^\--\s*([^\:]+)\:\s*(.*)', line.strip())
if arg_value_data:
current_command_argument_value = arg_value_data[0][0]
argument_definition = arg_value_data[0][1]
arg_definitions[current_command_argument]['list_items'].append({
'value': current_command_argument_value,
'definition': argument_definition})
elif current_command_argument_value:
list_length = (len(
arg_definitions[current_command_argument]['list_items']) - 1)
arg_definitions[current_command_argument][
'list_items'][list_length]['definition'] += ' ' + line.strip()
elif line and current_command_argument:
arg_definitions[current_command_argument]['description'] += ' ' + line.strip()
elif re.search(r'^COMMAND ARGUMENT DEFINITIONS', line.strip()):
get_argument_definitions = True
# Handle arguments that have different names but same meaning
new_arg_definitions = {}
modifiers = ['destination', 'new master', 'original', 'parent', 'served',
'source', 'target']
for defined_argument in arg_definitions:
argument_list = defined_argument.split(',')
if len(argument_list) > 1:
for argument_list_item in argument_list:
new_arg_definitions[argument_list_item.strip()] = (
arg_definitions[defined_argument])
for modifier in modifiers:
new_arg_definitions[modifier + ' ' + argument_list_item.strip()] = (
arg_definitions[defined_argument])
else:
new_arg_definitions[defined_argument] = arg_definitions[defined_argument]
for modifier in modifiers:
new_arg_definitions[modifier + ' ' + defined_argument] = (
arg_definitions[defined_argument])
#print json.dumps(new_arg_definitions, sort_keys=True, indent=4)
#print json.dumps(commands["Generic"], sort_keys=True, indent=4)
create_reference_doc(root_directory, commands, new_arg_definitions)
return
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-r', '--root-directory', default='..',
help='root directory for the vitess github tree')
(options, args) = parser.parse_args()
main(options.root_directory)
| apache-2.0 |
adgon92/optimalization-project | src/plot/ploter.py | 1 | 1397 | __author__ = 'Przemek'
import numpy as np
import matplotlib.pyplot as plt
class Ploter:
def __init__(self):
pass
def plot_temperature(self, temperature, cooling_method, initial_temperature, numb_of_cycles):
plt.plot(temperature, 'b.-')
plt.xlabel('Number of cycles')
plt.ylabel('Temperature')
plt.text(0.7 * numb_of_cycles, max(temperature),
self._get_chart_description(cooling_method, initial_temperature, numb_of_cycles))
def plot(self, objectives, cooling_method, initial_temperature, numb_of_cycles):
plt.plot(objectives, 'r.-')
plt.xlabel('Number of cycles')
plt.ylabel('Quality of solution')
plt.text(0.7 * numb_of_cycles, max(objectives),
self._get_chart_description(cooling_method, initial_temperature, numb_of_cycles),
withdash=False)
@staticmethod
def _get_chart_description(cooling_method, initial_temperature, numb_of_cycles):
return 'Cooling method {}\nInitial temperature: {}\nNumb of cycles: {}'.format(cooling_method,
initial_temperature,
numb_of_cycles)
def save(self, path):
plt.savefig(path)
def show(self):
plt.show()
| gpl-2.0 |
beppec56/core | wizards/com/sun/star/wizards/common/NumberFormatter.py | 11 | 8643 | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
import traceback
from com.sun.star.lang import Locale
class NumberFormatter(object):
def __init__(self, _xNumberFormatsSupplier, _aLocale, _xMSF=None):
self.iDateFormatKey = -1
self.iDateTimeFormatKey = -1
self.iNumberFormatKey = -1
self.iTextFormatKey = -1
self.iTimeFormatKey = -1
self.iLogicalFormatKey = -1
self.bNullDateCorrectionIsDefined = False
self.aLocale = _aLocale
if _xMSF is not None:
self.xNumberFormatter = _xMSF.createInstance(
"com.sun.star.util.NumberFormatter")
self.xNumberFormats = _xNumberFormatsSupplier.NumberFormats
self.xNumberFormatSettings = \
_xNumberFormatsSupplier.NumberFormatSettings
self.xNumberFormatter.attachNumberFormatsSupplier(
_xNumberFormatsSupplier)
'''
@param _xMSF
@param _xNumberFormatsSupplier
@return
@throws Exception
@deprecated
'''
@classmethod
def createNumberFormatter(self, _xMSF, _xNumberFormatsSupplier):
oNumberFormatter = _xMSF.createInstance(
"com.sun.star.util.NumberFormatter")
oNumberFormatter.attachNumberFormatsSupplier(_xNumberFormatsSupplier)
return oNumberFormatter
'''
gives a key to pass to a NumberFormat object. <br/>
example: <br/>
<pre>
XNumberFormatsSupplier nsf =
(XNumberFormatsSupplier)UnoRuntime.queryInterface(...,document)
int key = Desktop.getNumberFormatterKey(
nsf, ...star.i18n.NumberFormatIndex.DATE...)
XNumberFormatter nf = Desktop.createNumberFormatter(xmsf, nsf);
nf.convertNumberToString( key, 1972 );
</pre>
@param numberFormatsSupplier
@param type - a constant out of i18n.NumberFormatIndex enumeration.
@return a key to use with a util.NumberFormat instance.
'''
@classmethod
def getNumberFormatterKey(self, numberFormatsSupplier, Type):
return numberFormatsSupplier.NumberFormats.getFormatIndex(
Type, Locale())
def convertNumberToString(self, _nkey, _dblValue, _xNumberFormatter=None):
if _xNumberFormatter is None:
return self.xNumberFormatter.convertNumberToString(
_nkey, _dblValue)
else:
return _xNumberFormatter.convertNumberToString(_nkey, _dblValue)
def convertStringToNumber(self, _nkey, _sString):
return self.xNumberFormatter.convertStringToNumber(_nkey, _sString)
'''
@param dateCorrection The lDateCorrection to set.
'''
def setNullDateCorrection(self, dateCorrection):
self.lDateCorrection = dateCorrection
def defineNumberFormat(self, _FormatString):
try:
NewFormatKey = self.xNumberFormats.queryKey(
_FormatString, self.aLocale, True)
if NewFormatKey is -1:
NewFormatKey = self.xNumberFormats.addNew(
_FormatString, self.aLocale)
return NewFormatKey
except Exception:
traceback.print_exc()
return -1
'''
returns a numberformat for a FormatString.
@param _FormatString
@param _aLocale
@return
'''
def defineNumberFormat(self, _FormatString, _aLocale):
try:
NewFormatKey = self.xNumberFormats.queryKey(
_FormatString, _aLocale, True)
if NewFormatKey == -1:
NewFormatKey = self.xNumberFormats.addNew(
_FormatString, _aLocale)
return NewFormatKey
except Exception:
traceback.print_exc()
return -1
def setNumberFormat(self, _xFormatObject, _FormatKey, _oNumberFormatter):
try:
xNumberFormat = _oNumberFormatter.xNumberFormats.getByKey(
_FormatKey)
FormatString = str(Helper.getUnoPropertyValue(
xNumberFormat, "FormatString"))
oLocale = Helper.getUnoPropertyValue(xNumberFormat, "Locale")
NewFormatKey = defineNumberFormat(FormatString, oLocale)
_xFormatObject.setPropertyValue(
"FormatsSupplier",
_oNumberFormatter.xNumberFormatter.getNumberFormatsSupplier())
if _xFormatObject.getPropertySetInfo().hasPropertyByName(
"NumberFormat"):
_xFormatObject.setPropertyValue("NumberFormat", NewFormatKey)
elif _xFormatObject.getPropertySetInfo().hasPropertyByName(
"FormatKey"):
_xFormatObject.setPropertyValue("FormatKey", NewFormatKey)
else:
# TODO: throws a exception in a try catch environment, very helpful?
raise Exception
except Exception:
traceback.print_exc()
def getNullDateCorrection(self):
if not self.bNullDateCorrectionIsDefined:
dNullDate = Helper.getUnoStructValue(
self.xNumberFormatSettings, "NullDate")
lNullDate = Helper.convertUnoDatetoInteger(dNullDate)
oCal = java.util.Calendar.getInstance()
oCal.set(1900, 1, 1)
dTime = oCal.getTime()
lTime = dTime.getTime()
lDBNullDate = lTime / (3600 * 24000)
self.lDateCorrection = lDBNullDate - lNullDate
return self.lDateCorrection
else:
return self.lDateCorrection
def setBooleanReportDisplayNumberFormat(self):
FormatString = "[=1]" + str(9745) + ";[=0]" + str(58480) + ";0"
self.iLogicalFormatKey = self.xNumberFormats.queryKey(
FormatString, self.aLocale, True)
try:
if self.iLogicalFormatKey == -1:
self.iLogicalFormatKey = self.xNumberFormats.addNew(
FormatString, self.aLocale)
except Exception:
#MalformedNumberFormat
traceback.print_exc()
self.iLogicalFormatKey = self.xNumberFormats.getStandardFormat(
NumberFormat.LOGICAL, self.aLocale)
return self.iLogicalFormatKey
'''
@return Returns the iDateFormatKey.
'''
def getDateFormatKey(self):
if self.iDateFormatKey == -1:
self.iDateFormatKey = self.xNumberFormats.getStandardFormat(
NumberFormat.DATE, self.aLocale)
return self.iDateFormatKey
'''
@return Returns the iDateTimeFormatKey.
'''
def getDateTimeFormatKey(self):
if self.iDateTimeFormatKey == -1:
self.iDateTimeFormatKey = self.xNumberFormats.getStandardFormat(
NumberFormat.DATETIME, self.aLocale)
return self.iDateTimeFormatKey
'''
@return Returns the iLogicalFormatKey.
'''
def getLogicalFormatKey(self):
if self.iLogicalFormatKey == -1:
self.iLogicalFormatKey = self.xNumberFormats.getStandardFormat(
NumberFormat.LOGICAL, self.aLocale)
return self.iLogicalFormatKey
'''
@return Returns the iNumberFormatKey.
'''
def getNumberFormatKey(self):
if self.iNumberFormatKey == -1:
self.iNumberFormatKey = self.xNumberFormats.getStandardFormat(
NumberFormat.NUMBER, self.aLocale)
return self.iNumberFormatKey
'''
@return Returns the iTextFormatKey.
'''
def getTextFormatKey(self):
if self.iTextFormatKey == -1:
self.iTextFormatKey = self.xNumberFormats.getStandardFormat(
NumberFormat.TEXT, self.aLocale)
return self.iTextFormatKey
'''
@return Returns the iTimeFormatKey.
'''
def getTimeFormatKey(self):
if self.iTimeFormatKey == -1:
self.iTimeFormatKey = self.xNumberFormats.getStandardFormat(
NumberFormat.TIME, self.aLocale)
return self.iTimeFormatKey
| gpl-3.0 |
rvmoura96/projeto-almoxarifado | myvenv/Lib/site-packages/pylint/test/functional/cellvar_escaping_loop.py | 12 | 2773 | # pylint: disable=print-statement
"""Tests for loopvar-in-closure."""
from __future__ import print_function
def good_case():
"""No problems here."""
lst = []
for i in range(10):
lst.append(i)
def good_case2():
"""No problems here."""
return [i for i in range(10)]
def good_case3():
"""No problems here."""
lst = []
for i in range(10): # [unused-variable]
lst.append(lambda i=i: i)
def good_case4():
"""No problems here."""
lst = []
for i in range(10):
print(i)
lst.append(lambda i: i)
def good_case5():
"""No problems here."""
return (i for i in range(10))
def good_case6():
"""Accept use of the variable after the loop.
There's already a warning about possibly undefined loop variables, and
the value will not change any more."""
for i in range(10):
print(i)
return lambda: i # [undefined-loop-variable]
def good_case7():
"""Accept use of the variable inside return."""
for i in range(10):
if i == 8:
return lambda: i
return lambda: -1
def good_case8():
"""Lambda defined and called in loop."""
for i in range(10):
print((lambda x: i + x)(1))
def good_case9():
"""Another eager binding of the cell variable."""
funs = []
for i in range(10):
def func(bound_i=i):
"""Ignore."""
return bound_i
funs.append(func)
return funs
def bad_case():
"""Closing over a loop variable."""
lst = []
for i in range(10):
print(i)
lst.append(lambda: i) # [cell-var-from-loop]
def bad_case2():
"""Closing over a loop variable."""
return [lambda: i for i in range(10)] # [cell-var-from-loop]
def bad_case3():
"""Closing over variable defined in loop."""
lst = []
for i in range(10):
j = i * i
lst.append(lambda: j) # [cell-var-from-loop]
return lst
def bad_case4():
"""Closing over variable defined in loop."""
lst = []
for i in range(10):
def nested():
"""Nested function."""
return i**2 # [cell-var-from-loop]
lst.append(nested)
return lst
def bad_case5():
"""Problematic case.
If this function is used as
>>> [x() for x in bad_case5()]
it behaves 'as expected', i.e. the result is range(10).
If it's used with
>>> lst = list(bad_case5())
>>> [x() for x in lst]
the result is [9] * 10 again.
"""
return (lambda: i for i in range(10)) # [cell-var-from-loop]
def bad_case6():
"""Closing over variable defined in loop."""
lst = []
for i, j in zip(range(10), range(10, 20)):
print(j)
lst.append(lambda: i) # [cell-var-from-loop]
return lst
| mit |
epage/MaemoPythonSkeleton | setup.py | 1 | 4524 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys).setdefaultencoding("UTF-8")
import os
try:
from sdist_maemo import sdist_maemo as _sdist_maemo
sdist_maemo = _sdist_maemo
except ImportError:
sdist_maemo = None
print 'sdist_maemo command not available'
from distutils.core import setup
#[[[cog
# import cog
# from REPLACEME import constants
# cog.outl('APP_NAME="%s"' % constants.__app_name__)
# cog.outl('PRETTY_APP_NAME="%s"' % constants.__pretty_app_name__)
# cog.outl('VERSION="%s"' % constants.__version__)
# cog.outl('BUILD="%s"' % constants.__build__)
# cog.outl('DESKTOP_FILE_PATH="%s"' % DESKTOP_FILE_PATH)
# cog.outl('INPUT_DESKTOP_FILE="%s"' % INPUT_DESKTOP_FILE)
# cog.outl('ICON_CATEGORY="%s"' % ICON_CATEGORY)
# cog.outl('ICON_SIZES=[%s]' % ICON_SIZES)
#]]]
APP_NAME="ejpi"
PRETTY_APP_NAME="e**(j pi) + 1 = 0"
VERSION="1.0.6"
BUILD="9"
DESKTOP_FILE_PATH="/usr/share/applications"
INPUT_DESKTOP_FILE="data/ubuntu/ejpi.desktop"
ICON_CATEGORY="apps"
ICON_SIZES=[32,48]
#[[[end]]]
CHANGES = """""".strip()
BUGTRACKER_URL = "REPLACEME"
def is_package(path):
return (
os.path.isdir(path) and
os.path.isfile(os.path.join(path, '__init__.py'))
)
def find_packages(path, base="", includeRoot=False):
""" Find all packages in path """
if includeRoot:
assert not base, "Base not supported with includeRoot: %r" % base
rootPath, module_name = os.path.split(path)
yield module_name
base = module_name
for item in os.listdir(path):
dir = os.path.join(path, item)
if is_package( dir ):
if base:
module_name = "%(base)s.%(item)s" % vars()
else:
module_name = item
yield module_name
for mname in find_packages(dir, module_name):
yield mname
setup(
name=APP_NAME,
version=VERSION,
description="REPLACEME",
long_description="REPACEME",
author="",
author_email="",
maintainer="",
maintainer_email="",
url="",
license="GNU LGPLv2.1",
scripts=[
"REPLACEME",
],
packages=list(find_packages(APP_NAME, includeRoot=True)),
package_data={
"REPLACEME": ["*.qml"],
},
data_files=[
(DESKTOP_FILE_PATH, [INPUT_DESKTOP_FILE]),
("/usr/share/icons/hicolor/scalable/apps", ["data/%s.svg" % APP_NAME]),
] +
[
(
"/usr/share/icons/hicolor/%sx%s/%s" % (size, size, ICON_CATEGORY),
["data/icons/%s/%s.png" % (size, APP_NAME)]
)
for size in ICON_SIZES
],
requires=[
"PySide",
"pyxdg",
],
cmdclass={
'sdist_ubuntu': sdist_maemo,
'sdist_diablo': sdist_maemo,
'sdist_fremantle': sdist_maemo,
'sdist_harmattan': sdist_maemo,
},
options={
"sdist_ubuntu": {
"debian_package": APP_NAME,
"section": "math",
"copyright": "lgpl",
"changelog": CHANGES,
"buildversion": str(BUILD),
"depends": "python, python-pyside.qtcore, python-pyside.qtgui, python-xdg",
"architecture": "any",
},
"sdist_diablo": {
"debian_package": APP_NAME,
"Maemo_Display_Name": PRETTY_APP_NAME,
#"Maemo_Upgrade_Description": CHANGES,
"Maemo_Bugtracker": BUGTRACKER_URL,
"Maemo_Icon_26": "data/icons/26/%s.png" % APP_NAME,
"section": "user/science",
"copyright": "lgpl",
"changelog": CHANGES,
"buildversion": str(BUILD),
"depends": "python2.5, python2.5-qt4-core, python2.5-qt4-gui, python-xdg, python-simplejson",
"architecture": "any",
},
"sdist_fremantle": {
"debian_package": APP_NAME,
"Maemo_Display_Name": PRETTY_APP_NAME,
#"Maemo_Upgrade_Description": CHANGES,
"Maemo_Bugtracker": BUGTRACKER_URL,
"Maemo_Icon_26": "data/icons/48/%s.png" % APP_NAME,
"section": "user/science",
"copyright": "lgpl",
"changelog": CHANGES,
"buildversion": str(BUILD),
#"depends": "python2.5, python2.5-qt4-core, python2.5-qt4-gui, python2.5-qt4-maemo5, python-xdg",
"depends": "python, python-pyside.qtcore, python-pyside.qtgui, python-pyside.qtmaemo5, python-xdg, python-simplejson",
"architecture": "any",
},
"sdist_harmattan": {
"debian_package": APP_NAME,
"Maemo_Display_Name": PRETTY_APP_NAME,
#"Maemo_Upgrade_Description": CHANGES,
"Maemo_Bugtracker": BUGTRACKER_URL,
"Maemo_Icon_26": "data/icons/48/%s.png" % APP_NAME,
"MeeGo_Desktop_Entry_Filename": APP_NAME,
#"MeeGo_Desktop_Entry": "",
"section": "user/science",
"copyright": "lgpl",
"changelog": CHANGES,
"buildversion": str(BUILD),
"depends": "python, python-pyside.qtcore, python-pyside.qtgui, python-xdg",
"architecture": "any",
},
"bdist_rpm": {
"requires": "REPLACEME",
"icon": "data/icons/48/%s.png" % APP_NAME,
"group": "REPLACEME",
},
},
)
| lgpl-2.1 |
HerlanAssis/Django-AulaOsvandoSantana | lib/python2.7/site-packages/django/conf/locale/__init__.py | 82 | 12130 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
"""
LANG_INFO is a dictionary structure to provide meta information about languages.
About name_local: capitalize it as if your language name was appearing
inside a sentence in your language.
The 'fallback' key can be used to specify a special fallback logic which doesn't
follow the traditional 'fr-ca' -> 'fr' fallback logic.
"""
LANG_INFO = {
'af': {
'bidi': False,
'code': 'af',
'name': 'Afrikaans',
'name_local': 'Afrikaans',
},
'ar': {
'bidi': True,
'code': 'ar',
'name': 'Arabic',
'name_local': 'العربيّة',
},
'ast': {
'bidi': False,
'code': 'ast',
'name': 'Asturian',
'name_local': 'asturianu',
},
'az': {
'bidi': True,
'code': 'az',
'name': 'Azerbaijani',
'name_local': 'Azərbaycanca',
},
'be': {
'bidi': False,
'code': 'be',
'name': 'Belarusian',
'name_local': 'беларуская',
},
'bg': {
'bidi': False,
'code': 'bg',
'name': 'Bulgarian',
'name_local': 'български',
},
'bn': {
'bidi': False,
'code': 'bn',
'name': 'Bengali',
'name_local': 'বাংলা',
},
'br': {
'bidi': False,
'code': 'br',
'name': 'Breton',
'name_local': 'brezhoneg',
},
'bs': {
'bidi': False,
'code': 'bs',
'name': 'Bosnian',
'name_local': 'bosanski',
},
'ca': {
'bidi': False,
'code': 'ca',
'name': 'Catalan',
'name_local': 'català',
},
'cs': {
'bidi': False,
'code': 'cs',
'name': 'Czech',
'name_local': 'česky',
},
'cy': {
'bidi': False,
'code': 'cy',
'name': 'Welsh',
'name_local': 'Cymraeg',
},
'da': {
'bidi': False,
'code': 'da',
'name': 'Danish',
'name_local': 'dansk',
},
'de': {
'bidi': False,
'code': 'de',
'name': 'German',
'name_local': 'Deutsch',
},
'el': {
'bidi': False,
'code': 'el',
'name': 'Greek',
'name_local': 'Ελληνικά',
},
'en': {
'bidi': False,
'code': 'en',
'name': 'English',
'name_local': 'English',
},
'en-au': {
'bidi': False,
'code': 'en-au',
'name': 'Australian English',
'name_local': 'Australian English',
},
'en-gb': {
'bidi': False,
'code': 'en-gb',
'name': 'British English',
'name_local': 'British English',
},
'eo': {
'bidi': False,
'code': 'eo',
'name': 'Esperanto',
'name_local': 'Esperanto',
},
'es': {
'bidi': False,
'code': 'es',
'name': 'Spanish',
'name_local': 'español',
},
'es-ar': {
'bidi': False,
'code': 'es-ar',
'name': 'Argentinian Spanish',
'name_local': 'español de Argentina',
},
'es-mx': {
'bidi': False,
'code': 'es-mx',
'name': 'Mexican Spanish',
'name_local': 'español de Mexico',
},
'es-ni': {
'bidi': False,
'code': 'es-ni',
'name': 'Nicaraguan Spanish',
'name_local': 'español de Nicaragua',
},
'es-ve': {
'bidi': False,
'code': 'es-ve',
'name': 'Venezuelan Spanish',
'name_local': 'español de Venezuela',
},
'et': {
'bidi': False,
'code': 'et',
'name': 'Estonian',
'name_local': 'eesti',
},
'eu': {
'bidi': False,
'code': 'eu',
'name': 'Basque',
'name_local': 'Basque',
},
'fa': {
'bidi': True,
'code': 'fa',
'name': 'Persian',
'name_local': 'فارسی',
},
'fi': {
'bidi': False,
'code': 'fi',
'name': 'Finnish',
'name_local': 'suomi',
},
'fr': {
'bidi': False,
'code': 'fr',
'name': 'French',
'name_local': 'français',
},
'fy': {
'bidi': False,
'code': 'fy',
'name': 'Frisian',
'name_local': 'frysk',
},
'ga': {
'bidi': False,
'code': 'ga',
'name': 'Irish',
'name_local': 'Gaeilge',
},
'gl': {
'bidi': False,
'code': 'gl',
'name': 'Galician',
'name_local': 'galego',
},
'he': {
'bidi': True,
'code': 'he',
'name': 'Hebrew',
'name_local': 'עברית',
},
'hi': {
'bidi': False,
'code': 'hi',
'name': 'Hindi',
'name_local': 'Hindi',
},
'hr': {
'bidi': False,
'code': 'hr',
'name': 'Croatian',
'name_local': 'Hrvatski',
},
'hu': {
'bidi': False,
'code': 'hu',
'name': 'Hungarian',
'name_local': 'Magyar',
},
'ia': {
'bidi': False,
'code': 'ia',
'name': 'Interlingua',
'name_local': 'Interlingua',
},
'io': {
'bidi': False,
'code': 'io',
'name': 'Ido',
'name_local': 'ido',
},
'id': {
'bidi': False,
'code': 'id',
'name': 'Indonesian',
'name_local': 'Bahasa Indonesia',
},
'is': {
'bidi': False,
'code': 'is',
'name': 'Icelandic',
'name_local': 'Íslenska',
},
'it': {
'bidi': False,
'code': 'it',
'name': 'Italian',
'name_local': 'italiano',
},
'ja': {
'bidi': False,
'code': 'ja',
'name': 'Japanese',
'name_local': '日本語',
},
'ka': {
'bidi': False,
'code': 'ka',
'name': 'Georgian',
'name_local': 'ქართული',
},
'kk': {
'bidi': False,
'code': 'kk',
'name': 'Kazakh',
'name_local': 'Қазақ',
},
'km': {
'bidi': False,
'code': 'km',
'name': 'Khmer',
'name_local': 'Khmer',
},
'kn': {
'bidi': False,
'code': 'kn',
'name': 'Kannada',
'name_local': 'Kannada',
},
'ko': {
'bidi': False,
'code': 'ko',
'name': 'Korean',
'name_local': '한국어',
},
'lb': {
'bidi': False,
'code': 'lb',
'name': 'Luxembourgish',
'name_local': 'Lëtzebuergesch',
},
'lt': {
'bidi': False,
'code': 'lt',
'name': 'Lithuanian',
'name_local': 'Lietuviškai',
},
'lv': {
'bidi': False,
'code': 'lv',
'name': 'Latvian',
'name_local': 'latviešu',
},
'mk': {
'bidi': False,
'code': 'mk',
'name': 'Macedonian',
'name_local': 'Македонски',
},
'ml': {
'bidi': False,
'code': 'ml',
'name': 'Malayalam',
'name_local': 'Malayalam',
},
'mn': {
'bidi': False,
'code': 'mn',
'name': 'Mongolian',
'name_local': 'Mongolian',
},
'mr': {
'bidi': False,
'code': 'mr',
'name': 'Marathi',
'name_local': 'मराठी',
},
'my': {
'bidi': False,
'code': 'my',
'name': 'Burmese',
'name_local': 'မြန်မာဘာသာ',
},
'nb': {
'bidi': False,
'code': 'nb',
'name': 'Norwegian Bokmal',
'name_local': 'norsk (bokmål)',
},
'ne': {
'bidi': False,
'code': 'ne',
'name': 'Nepali',
'name_local': 'नेपाली',
},
'nl': {
'bidi': False,
'code': 'nl',
'name': 'Dutch',
'name_local': 'Nederlands',
},
'nn': {
'bidi': False,
'code': 'nn',
'name': 'Norwegian Nynorsk',
'name_local': 'norsk (nynorsk)',
},
'no': {
'bidi': False,
'code': 'no',
'name': 'Norwegian',
'name_local': 'norsk',
},
'os': {
'bidi': False,
'code': 'os',
'name': 'Ossetic',
'name_local': 'Ирон',
},
'pa': {
'bidi': False,
'code': 'pa',
'name': 'Punjabi',
'name_local': 'Punjabi',
},
'pl': {
'bidi': False,
'code': 'pl',
'name': 'Polish',
'name_local': 'polski',
},
'pt': {
'bidi': False,
'code': 'pt',
'name': 'Portuguese',
'name_local': 'Português',
},
'pt-br': {
'bidi': False,
'code': 'pt-br',
'name': 'Brazilian Portuguese',
'name_local': 'Português Brasileiro',
},
'ro': {
'bidi': False,
'code': 'ro',
'name': 'Romanian',
'name_local': 'Română',
},
'ru': {
'bidi': False,
'code': 'ru',
'name': 'Russian',
'name_local': 'Русский',
},
'sk': {
'bidi': False,
'code': 'sk',
'name': 'Slovak',
'name_local': 'slovenský',
},
'sl': {
'bidi': False,
'code': 'sl',
'name': 'Slovenian',
'name_local': 'Slovenščina',
},
'sq': {
'bidi': False,
'code': 'sq',
'name': 'Albanian',
'name_local': 'shqip',
},
'sr': {
'bidi': False,
'code': 'sr',
'name': 'Serbian',
'name_local': 'српски',
},
'sr-latn': {
'bidi': False,
'code': 'sr-latn',
'name': 'Serbian Latin',
'name_local': 'srpski (latinica)',
},
'sv': {
'bidi': False,
'code': 'sv',
'name': 'Swedish',
'name_local': 'svenska',
},
'sw': {
'bidi': False,
'code': 'sw',
'name': 'Swahili',
'name_local': 'Kiswahili',
},
'ta': {
'bidi': False,
'code': 'ta',
'name': 'Tamil',
'name_local': 'தமிழ்',
},
'te': {
'bidi': False,
'code': 'te',
'name': 'Telugu',
'name_local': 'తెలుగు',
},
'th': {
'bidi': False,
'code': 'th',
'name': 'Thai',
'name_local': 'ภาษาไทย',
},
'tr': {
'bidi': False,
'code': 'tr',
'name': 'Turkish',
'name_local': 'Türkçe',
},
'tt': {
'bidi': False,
'code': 'tt',
'name': 'Tatar',
'name_local': 'Татарча',
},
'udm': {
'bidi': False,
'code': 'udm',
'name': 'Udmurt',
'name_local': 'Удмурт',
},
'uk': {
'bidi': False,
'code': 'uk',
'name': 'Ukrainian',
'name_local': 'Українська',
},
'ur': {
'bidi': True,
'code': 'ur',
'name': 'Urdu',
'name_local': 'اردو',
},
'vi': {
'bidi': False,
'code': 'vi',
'name': 'Vietnamese',
'name_local': 'Tiếng Việt',
},
'zh-cn': {
'fallback': ['zh-hans'],
'bidi': False,
'code': 'zh-cn',
'name': 'Simplified Chinese',
'name_local': '简体中文',
},
'zh-hans': {
'bidi': False,
'code': 'zh-hans',
'name': 'Simplified Chinese',
'name_local': '简体中文',
},
'zh-hant': {
'bidi': False,
'code': 'zh-hant',
'name': 'Traditional Chinese',
'name_local': '繁體中文',
},
'zh-hk': {
'fallback': ['zh-hant'],
},
'zh-mo': {
'fallback': ['zh-hant'],
},
'zh-my': {
'fallback': ['zh-hans'],
},
'zh-sg': {
'fallback': ['zh-hans'],
},
'zh-tw': {
'fallback': ['zh-hant'],
'bidi': False,
'code': 'zh-tw',
'name': 'Traditional Chinese',
'name_local': '繁體中文',
},
}
| mit |
v1ron/linux-mainline | tools/perf/scripts/python/syscall-counts.py | 1996 | 1700 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
joebowen/movement_validation_cloud | djangodev/lib/python2.7/site-packages/django/utils/six.py | 15 | 25150 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.6.1"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
try:
result = self._resolve()
except ImportError:
# See the nice big comment in MovedModule.__getattr__.
raise AttributeError("%s could not be imported " % self.name)
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
delattr(obj.__class__, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
# It turns out many Python frameworks like to traverse sys.modules and
# try to load various attributes. This causes problems if this is a
# platform-specific module on the wrong platform, like _winreg on
# Unixes. Therefore, we silently pretend unimportable modules do not
# have any attributes. See issues #51, #53, #56, and #63 for the full
# tales of woe.
#
# First, if possible, avoid loading the module just to look at __file__,
# __name__, or __path__.
if (attr in ("__file__", "__name__", "__path__") and
self.mod not in sys.modules):
raise AttributeError(attr)
try:
_module = self._resolve()
except ImportError:
raise AttributeError(attr)
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "xmlrpclib", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
sys.modules[__name__ + ".moves." + attr.name] = attr
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
sys.modules[__name__ + ".moves.urllib_parse"] = sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
sys.modules[__name__ + ".moves.urllib_error"] = sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
sys.modules[__name__ + ".moves.urllib_request"] = sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
sys.modules[__name__ + ".moves.urllib_response"] = sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
sys.modules[__name__ + ".moves.urllib_robotparser"] = sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
parse = sys.modules[__name__ + ".moves.urllib_parse"]
error = sys.modules[__name__ + ".moves.urllib_error"]
request = sys.modules[__name__ + ".moves.urllib_request"]
response = sys.modules[__name__ + ".moves.urllib_response"]
robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
### Additional customizations for Django ###
if PY3:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
memoryview = memoryview
buffer_types = (bytes, bytearray, memoryview)
else:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
# memoryview and buffer are not strictly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if sys.platform.startswith('java'):
memoryview = memoryview
else:
memoryview = buffer
buffer_types = (bytearray, memoryview)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
add_move(MovedModule("_dummy_thread", "dummy_thread"))
add_move(MovedModule("_thread", "thread"))
| mit |
hyowon/servo | tests/wpt/harness/wptrunner/executors/executorservo.py | 14 | 7768 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import base64
import hashlib
import json
import os
import subprocess
import tempfile
import threading
import urlparse
import uuid
from collections import defaultdict
from mozprocess import ProcessHandler
from .base import (ExecutorException,
Protocol,
RefTestImplementation,
testharness_result_converter,
reftest_result_converter)
from .process import ProcessTestExecutor
from ..browsers.base import browser_command
hosts_text = """127.0.0.1 web-platform.test
127.0.0.1 www.web-platform.test
127.0.0.1 www1.web-platform.test
127.0.0.1 www2.web-platform.test
127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
127.0.0.1 xn--lve-6lad.web-platform.test
"""
def make_hosts_file():
hosts_fd, hosts_path = tempfile.mkstemp()
with os.fdopen(hosts_fd, "w") as f:
f.write(hosts_text)
return hosts_path
class ServoTestharnessExecutor(ProcessTestExecutor):
convert_result = testharness_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, debug_info=None,
pause_after_test=False):
ProcessTestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.pause_after_test = pause_after_test
self.result_data = None
self.result_flag = None
self.protocol = Protocol(self, browser)
self.hosts_path = make_hosts_file()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
ProcessTestExecutor.teardown(self)
def do_test(self, test):
self.result_data = None
self.result_flag = threading.Event()
debug_args, command = browser_command(self.binary,
["--cpu", "--hard-fail", "-u", "Servo/wptrunner", "-z", self.test_url(test)],
self.debug_info)
self.command = command
if self.pause_after_test:
self.command.remove("-z")
self.command = debug_args + self.command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
onFinish=self.on_finish,
env=env,
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(self.command, env=env)
try:
timeout = test.timeout * self.timeout_multiplier
# Now wait to get the output we expect, or until we reach the timeout
if not self.interactive and not self.pause_after_test:
wait_timeout = timeout + 5
self.result_flag.wait(wait_timeout)
else:
wait_timeout = None
self.proc.wait()
proc_is_running = True
if self.result_flag.is_set():
if self.result_data is not None:
self.result_data["test"] = test.url
result = self.convert_result(test, self.result_data)
else:
self.proc.wait()
result = (test.result_cls("CRASH", None), [])
proc_is_running = False
else:
result = (test.result_cls("TIMEOUT", None), [])
if proc_is_running:
if self.pause_after_test:
self.logger.info("Pausing until the browser exits")
self.proc.wait()
else:
self.proc.kill()
except KeyboardInterrupt:
self.proc.kill()
raise
return result
def on_output(self, line):
prefix = "ALERT: RESULT: "
line = line.decode("utf8", "replace")
if line.startswith(prefix):
self.result_data = json.loads(line[len(prefix):])
self.result_flag.set()
else:
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
def on_finish(self):
self.result_flag.set()
class TempFilename(object):
def __init__(self, directory):
self.directory = directory
self.path = None
def __enter__(self):
self.path = os.path.join(self.directory, str(uuid.uuid4()))
return self.path
def __exit__(self, *args, **kwargs):
try:
os.unlink(self.path)
except OSError:
pass
class ServoRefTestExecutor(ProcessTestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, binary=None, timeout_multiplier=1,
screenshot_cache=None, debug_info=None, pause_after_test=False):
ProcessTestExecutor.__init__(self,
browser,
server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = Protocol(self, browser)
self.screenshot_cache = screenshot_cache
self.implementation = RefTestImplementation(self)
self.tempdir = tempfile.mkdtemp()
self.hosts_path = make_hosts_file()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
os.rmdir(self.tempdir)
ProcessTestExecutor.teardown(self)
def screenshot(self, test):
full_url = self.test_url(test)
with TempFilename(self.tempdir) as output_path:
self.command = [self.binary, "--cpu", "--hard-fail", "--exit",
"-u", "Servo/wptrunner", "-Z", "disable-text-aa",
"--output=%s" % output_path, full_url]
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
env=env)
try:
self.proc.run()
timeout = test.timeout * self.timeout_multiplier + 5
rv = self.proc.wait(timeout=timeout)
except KeyboardInterrupt:
self.proc.kill()
raise
if rv is None:
self.proc.kill()
return False, ("EXTERNAL-TIMEOUT", None)
if rv != 0 or not os.path.exists(output_path):
return False, ("CRASH", None)
with open(output_path) as f:
# Might need to strip variable headers or something here
data = f.read()
return True, base64.b64encode(data)
def do_test(self, test):
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def on_output(self, line):
line = line.decode("utf8", "replace")
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
| mpl-2.0 |
fdemmer/togglwrapper | docs/source/conf.py | 1 | 9286 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# togglwrapper documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 1 10:22:32 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'togglwrapper'
copyright = '2015, aarose'
author = 'aarose'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'togglwrapperdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'togglwrapper.tex', 'togglwrapper Documentation',
'aarose', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'togglwrapper', 'togglwrapper Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'togglwrapper', 'togglwrapper Documentation',
author, 'togglwrapper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/operations/_load_balancer_network_interfaces_operations.py | 1 | 5718 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerNetworkInterfacesOperations(object):
"""LoadBalancerNetworkInterfacesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceListResult"]
"""Gets associated load balancer network interfaces.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces'} # type: ignore
| mit |
empireryan/director | src/python/ddapp/skybox.py | 6 | 3728 | import ddapp.visualization as vis
from ddapp import filterUtils
import ddapp.vtkAll as vtk
import ddapp.vtkNumpy as vnp
from ddapp.shallowCopy import shallowCopy
from ddapp import ioUtils
import numpy as np
def createTexturedPlane():
source = vtk.vtkPlaneSource()
textureMap = vtk.vtkTextureMapToPlane()
textureMap.SetInput(source.GetOutput())
textureMap.Update()
return shallowCopy(textureMap.GetOutput())
def getSkyboxSides():
return ['top', 'bottom', 'front', 'back', 'left', 'right']
def createSkyboxPlane(side):
pd = createTexturedPlane()
t = vtk.vtkTransform()
t.PostMultiply()
if side == 'top':
t.Translate(0,0,0.5)
t.RotateZ(180)
elif side == 'bottom':
t.RotateX(180)
t.RotateY(180)
t.RotateZ(-270)
t.Translate(0,0,-0.5)
elif side == 'front':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(180)
t.Translate(0.5,0.0,0.0)
elif side == 'back':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(0)
t.Translate(-0.5,0.0,0.0)
elif side == 'left':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(-90)
t.Translate(0.0,0.5,0.0)
elif side == 'right':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(90)
t.Translate(0.0,-0.5,0.0)
pd = filterUtils.transformPolyData(pd, t)
return pd
def createSkyboxPlanes():
planes = {}
for side in getSkyboxSides():
planes[side] = createSkyboxPlane(side)
return planes
def createTexture(imageFilename):
image = ioUtils.readImage(imageFilename)
tex = vtk.vtkTexture()
tex.SetInput(image)
tex.EdgeClampOn()
tex.RepeatOff()
return tex
def createSkybox(imageMap, view):
objs = {}
planes = createSkyboxPlanes()
for side, imageFilename in imageMap.iteritems():
texture = createTexture(imageFilename)
obj = vis.PolyDataItem('skybox %s' % side, planes[side], view=None)
obj.actor.SetTexture(texture)
obj.actor.GetProperty().LightingOff()
view.backgroundRenderer().AddActor(obj.actor)
objs[side] = obj
return objs
def getSkyboxImages(baseDir):
imageMap = dict(
top = baseDir + '/topmars1.jpg',
bottom = baseDir + '/botmars1.jpg',
front = baseDir + '/frontmars1.jpg',
back = baseDir + '/backmars1.jpg',
left = baseDir + '/leftmars1.jpg',
right = baseDir + '/rightmars1.jpg')
return imageMap
def createTextureGround(imageFilename, view):
pd = createTexturedPlane()
texture = createTexture(imageFilename)
texture.RepeatOn()
tcoords = vnp.getNumpyFromVtk(pd, 'Texture Coordinates')
tcoords *= 60
t = vtk.vtkTransform()
t.PostMultiply()
t.Scale(200,200,200)
t.Translate(0,0,-0.005)
pd = filterUtils.transformPolyData(pd, t)
obj = vis.showPolyData(pd, 'ground', view=view, alpha=1.0, parent='skybox')
obj.actor.SetTexture(texture)
obj.actor.GetProperty().LightingOff()
def connectSkyboxCamera(view, debug=False):
baseRen = view.backgroundRenderer()
def updateSkyboxCamera(o, e):
c = baseRen.GetActiveCamera()
c2 = view.camera()
viewDirection = np.array(c2.GetFocalPoint()) - np.array(c2.GetPosition())
viewDirection /= np.linalg.norm(viewDirection)
if debug:
c.SetPosition(c2.GetPosition())
c.SetFocalPoint(c2.GetFocalPoint())
else:
c.SetPosition(0,0,0)
c.SetFocalPoint(viewDirection)
c.SetViewUp(c2.GetViewUp())
c.SetViewAngle(c2.GetViewAngle())
view.renderWindow().AddObserver('StartEvent', updateSkyboxCamera)
| bsd-3-clause |
adviti/melange | thirdparty/google_appengine/lib/django_1_2/django/contrib/gis/gdal/error.py | 169 | 1517 | """
This module houses the OGR & SRS Exception objects, and the
check_err() routine which checks the status code returned by
OGR methods.
"""
#### OGR & SRS Exceptions ####
class GDALException(Exception): pass
class OGRException(Exception): pass
class SRSException(Exception): pass
class OGRIndexError(OGRException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
silent_variable_failure = True
#### OGR error checking codes and routine ####
# OGR Error Codes
OGRERR_DICT = { 1 : (OGRException, 'Not enough data.'),
2 : (OGRException, 'Not enough memory.'),
3 : (OGRException, 'Unsupported geometry type.'),
4 : (OGRException, 'Unsupported operation.'),
5 : (OGRException, 'Corrupt data.'),
6 : (OGRException, 'OGR failure.'),
7 : (SRSException, 'Unsupported SRS.'),
8 : (OGRException, 'Invalid handle.'),
}
OGRERR_NONE = 0
def check_err(code):
"Checks the given OGRERR, and raises an exception where appropriate."
if code == OGRERR_NONE:
return
elif code in OGRERR_DICT:
e, msg = OGRERR_DICT[code]
raise e, msg
else:
raise OGRException('Unknown error code: "%s"' % code)
| apache-2.0 |
waheedahmed/edx-platform | common/lib/xmodule/xmodule/modulestore/perf_tests/generate_asset_xml.py | 83 | 5955 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generates fake XML for asset metadata.
"""
import random
from lxml import etree
from datetime import datetime, timedelta
from xmodule.assetstore import AssetMetadata
from opaque_keys.edx.keys import CourseKey
try:
import click
except ImportError:
click = None
# Name of the asset metadata XML schema definition file.
ASSET_XSD_FILE = 'assets.xsd'
# Characters used in name generation below.
NAME_CHARS = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-'
NAME_CHARS_W_UNICODE = NAME_CHARS + u'àĚŘDžΦШΩΣӔ'
def coin_flip():
"""
50/50 chance
"""
return random.choice((True, False))
def asset_type():
"""
Pick an asset type at random.
"""
asset_type_choices = (
(95, "asset"),
(100, "video")
)
d100 = random.randint(0, 100)
for choice in asset_type_choices:
if d100 <= choice[0]:
return choice[1]
return asset_type_choices[-1][1]
def filename():
"""
Fake a filename.
"""
fname = u''
for __ in xrange(random.randint(10, 30)):
fname += random.choice(NAME_CHARS_W_UNICODE)
fname += random.choice(('.jpg', '.pdf', '.png', '.txt'))
return fname
def pathname():
"""
Fake a pathname.
"""
pname = u''
for __ in xrange(random.randint(2, 3)):
for __ in xrange(random.randint(5, 10)):
pname += random.choice(NAME_CHARS)
pname += '/'
return pname
def locked():
"""
Locked or unlocked.
"""
return coin_flip()
def fields():
"""
Generate some fake extra fields.
"""
f = {}
if coin_flip():
if coin_flip():
f['copyrighted'] = coin_flip()
if coin_flip():
f['size'] = random.randint(100, 10000000)
if coin_flip():
f['color'] = random.choice(('blue', 'pink', 'fuchsia', 'rose', 'mauve', 'black'))
return f
def user_id():
"""
Fake user id.
"""
return random.randint(1, 100000000)
def versions():
"""
Fake versions.
"""
curr_ver = random.randint(1, 500)
prev_ver = curr_ver - 1
def ver_str(ver):
"""
Version string.
"""
return 'v{}.0'.format(ver)
return (ver_str(curr_ver), ver_str(prev_ver))
def date_and_time():
"""
Fake date/time.
"""
start_date = datetime.now()
time_back = timedelta(seconds=random.randint(0, 473040000)) # 15 year interval
return start_date - time_back
def contenttype():
"""
Random MIME type.
"""
return random.choice((
'image/jpeg',
'text/html',
'audio/aiff',
'video/avi',
'text/plain',
'application/msword',
'application/x-gzip',
'application/javascript',
))
def generate_random_asset_md():
"""
Generates a single AssetMetadata object with semi-random data.
"""
course_key = CourseKey.from_string('org/course/run')
asset_key = course_key.make_asset_key(asset_type(), filename())
(curr_version, prev_version) = versions()
return AssetMetadata(
asset_key,
pathname=pathname(),
internal_name=str([filename() for __ in xrange(10)]),
locked=locked(),
contenttype=contenttype(),
thumbnail=filename(),
fields=fields(),
curr_version=curr_version,
prev_version=prev_version,
edited_by=user_id(),
edited_by_email='staff@edx.org',
edited_on=date_and_time(),
created_by=user_id(),
created_by_email='staff@edx.org',
created_on=date_and_time(),
)
def make_asset_md(amount):
"""
Make a number of fake AssetMetadata objects.
"""
all_asset_md = []
for __ in xrange(amount):
all_asset_md.append(generate_random_asset_md())
return all_asset_md
def make_asset_xml(amount, xml_filename):
"""
Make an XML file filled with fake AssetMetadata.
"""
all_md = make_asset_md(amount)
xml_root = etree.Element("assets")
for mdata in all_md:
asset_element = etree.SubElement(xml_root, "asset")
mdata.to_xml(asset_element)
with open(xml_filename, "w") as xml_file:
etree.ElementTree(xml_root).write(xml_file)
def validate_xml(xsd_filename, xml_filename):
"""
Validate a generated XML file against the XSD.
"""
with open(xsd_filename, 'r') as f:
schema_root = etree.XML(f.read())
schema = etree.XMLSchema(schema_root)
xmlparser = etree.XMLParser(schema=schema)
with open(xml_filename, 'r') as f:
etree.fromstring(f.read(), xmlparser)
if click is not None:
# pylint: disable=bad-continuation
@click.command()
@click.option('--num_assets',
type=click.INT,
default=10,
help="Number of assets to be generated by the script.",
required=False
)
@click.option('--output_xml',
type=click.File('w'),
default=AssetMetadata.EXPORTED_ASSET_FILENAME,
help="Filename for the output XML file.",
required=False
)
@click.option('--input_xsd',
type=click.File('r'),
default=ASSET_XSD_FILE,
help="Filename for the XSD (schema) file to read in.",
required=False
)
def cli(num_assets, output_xml, input_xsd):
"""
Generates a number of fake asset metadata items as XML - and validates the XML against the schema.
"""
make_asset_xml(num_assets, output_xml)
# Now - validate the XML against the XSD.
validate_xml(input_xsd, output_xml)
if __name__ == '__main__':
if click is not None:
cli() # pylint: disable=no-value-for-parameter
else:
print "Aborted! Module 'click' is not installed."
| agpl-3.0 |
hirofumi0810/asr_preprocessing | swbd/path.py | 1 | 9200 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Prepare for making dataset (Switchboard corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, basename
from glob import glob
class Path(object):
"""Prepare for making dataset.
Args:
swbd_audio_path (string): path to audio files of Switchboard corpus
swbd_trans_path (string): path to transcipt files of Switchboard corpus
eval2000_audio_path (string): path to audio files of eval2000 corpus
eval2000_trans_path (string): path to trans files of eval2000 corpus
fisher_path (string): path to Fisher corpus
run_root_path (string): path to ./make.sh
"""
def __init__(self, swbd_audio_path, swbd_trans_path, eval2000_audio_path,
eval2000_trans_path, run_root_path, fisher_path=None,
wav_save_path=None, htk_save_path=None):
self.swbd_audio_path = swbd_audio_path
self.swbd_trans_path = swbd_trans_path
self.eval2000_audio_path = eval2000_audio_path
self.eval2000_trans_path = eval2000_trans_path
self.fisher_path = fisher_path
self.wav_save_path = wav_save_path
self.htk_save_path = htk_save_path
self.pem_path = None
# NOTE: hub5e_00.pem file is a segmentation file
self.stm_path = None
# NOTE* stm is a transcription file of swbd and callhome
# Absolute path to this directory
self.run_root_path = run_root_path
self.__make()
def __make(self):
self._sph_paths = {
'swbd': [],
'fisher': [],
'eval2000_swbd': [],
'eval2000_ch': []
}
self._trans_paths = {
'swbd': [],
'fisher': [],
'eval2000_swbd': [],
'eval2000_ch': []
}
self._word_paths = {
'swbd': [],
'fisher': [],
'eval2000_swbd': [],
'eval2000_ch': []
}
####################
# train (LDC97S62)
####################
if self.swbd_audio_path is not None:
self.word_dict_path = join(
self.swbd_audio_path, 'sw-ms98-dict.text')
for sph_path in glob(join(self.swbd_audio_path, '*/data/*.sph')):
self._sph_paths['swbd'].append(sph_path)
if self.swbd_trans_path is not None:
for trans_path in glob(join(self.swbd_trans_path,
'*/*/*.text')):
if trans_path.split('.')[0][-4:] == 'word':
self._word_paths['swbd'].append(trans_path)
elif trans_path.split('.')[0][-5:] == 'trans':
self._trans_paths['swbd'].append(trans_path)
####################
# train (Fisher)
####################
if self.fisher_path is not None:
for sph_path in glob(join(self.fisher_path, 'audio/*/*.sph')):
self._sph_paths['fisher'].append(sph_path)
for trans_path in glob(join(self.fisher_path, 'data/trans/*/*.txt')):
self._trans_paths['fisher'].append(trans_path)
########################################
# test (eval2000)
########################################
if self.eval2000_audio_path is not None:
for file_path in glob(join(self.eval2000_audio_path, 'english/*')):
file_name = basename(file_path)
if file_name[:2] == 'sw':
self._sph_paths['eval2000_swbd'].append(file_path)
elif file_name[:2] == 'en':
self._sph_paths['eval2000_ch'].append(file_path)
elif file_name == 'hub5e_00.pem':
self.pem_path = file_path
if self.eval2000_trans_path is not None:
for file_path in glob(join(self.eval2000_trans_path, 'reference/english/*')):
file_name = basename(file_path)
if file_name[:2] == 'sw':
self._trans_paths['eval2000_swbd'].append(file_path)
elif file_name[:2] == 'en':
self._trans_paths['eval2000_ch'].append(file_path)
self.stm_path = join(self.eval2000_trans_path,
'reference', 'hub5e00.english.000405.stm')
self.glm_path = join(self.eval2000_trans_path,
'reference', 'en20000405_hub5.glm')
def sph(self, corpus):
"""Get paths to sph files of training data.
Args:
corpus (string): swbd or fisher or eval2000_swbd or
eval2000_ch
Returns:
paths to sph files
"""
return sorted(self._sph_paths[corpus])
def wav(self, corpus):
"""Get paths to wav files of training data.
Args:
corpus (string): swbd or fisher or eval2000_swbd or
eval2000_ch
Returns:
paths to wav files
"""
if self.wav_save_path is None:
raise ValueError('Set path to wav files.')
if corpus == 'swbd':
return [p for p in glob(join(self.wav_save_path, 'swbd/*.wav'))]
# ex.) wav/swbd/
elif corpus == 'fisher':
if self.fisher_path is None:
raise ValueError('Set path to fisher corpus.')
return [p for p in glob(join(self.wav_save_path, 'fisher/*/*.wav'))]
# ex.) wav/fisher/speaker/*.wav
elif corpus == 'eval2000_swbd':
return [p for p in glob(join(self.wav_save_path, 'eval2000/swbd/*.wav'))]
# ex.) wav/eval2000/swbd/*.wav
elif corpus == 'eval2000_ch':
return [p for p in glob(join(self.wav_save_path, 'eval2000/callhome/*.wav'))]
# ex.) wav/eval2000/callhome/*.wav
else:
raise TypeError
def htk(self, corpus):
"""Get paths to htk files of training data.
Args:
corpus (string): swbd or fisher or eval2000_swbd or
eval2000_ch
Returns:
paths to htk files
"""
if self.htk_save_path is None:
raise ValueError('Set path to htk files.')
if corpus == 'swbd':
return [p for p in glob(join(self.htk_save_path, 'swbd/*.htk'))]
# ex.) htk/swbd/
elif corpus == 'fisher':
if self.fisher_path is None:
raise ValueError('Set path to fisher corpus.')
return [p for p in glob(join(self.htk_save_path, 'fisher/*/*.htk'))]
# ex.) htk/fisher/speaker/*.htk
elif corpus == 'eval2000_swbd':
return [p for p in glob(join(self.htk_save_path, 'eval2000/swbd/*.htk'))]
# ex.) htk/eval2000/swbd/*.htk
elif corpus == 'eval2000_ch':
return [p for p in glob(join(self.htk_save_path, 'eval2000/callhome/*.htk'))]
# ex.) htk/eval2000/callhome/*.htk
else:
raise TypeError
def trans(self, corpus):
"""Get paths to transcription files of the training data.
Args:
corpus (string): swbd or fisher or eval2000_swbd or
eval2000_ch
Returns:
paths: paths to transcription files
"""
return sorted(self._trans_paths[corpus])
def word(self, corpus):
"""Get paths to word boundary files of the training data.
Args:
corpus (string): swbd
Returns:
paths: paths to transcription files
"""
assert corpus == 'swbd'
return sorted(self._word_paths['swbd'])
if __name__ == '__main__':
path = Path(
swbd_audio_path='/n/sd8/inaguma/corpus/swbd/data/LDC97S62',
swbd_trans_path='/n/sd8/inaguma/corpus/swbd/swb_ms98_transcriptions',
fisher_path='/n/sd8/inaguma/corpus/swbd/data/fisher',
eval2000_audio_path='/n/sd8/inaguma/corpus/swbd/data/eval2000/LDC2002S09',
eval2000_trans_path='/n/sd8/inaguma/corpus/swbd/data/eval2000/LDC2002T43',
wav_save_path='/n/sd8/inaguma/corpus/swbd/wav',
htk_save_path='/n/sd8/inaguma/corpus/swbd/htk',
run_root_path='./')
print('===== LDC97S62 ====')
print(len(path.sph(corpus='swbd'))) # 2ch
print(len(path.wav(corpus='swbd')))
print(len(path.htk(corpus='swbd')))
print(len(path.trans(corpus='swbd')))
print(len(path.word(corpus='swbd')))
print('==== Fisher ====')
print(len(path.sph(corpus='fisher')))
print(len(path.wav(corpus='fisher')))
print(len(path.htk(corpus='fisher')))
print(len(path.trans(corpus='fisher')))
print('==== eval2000 (SWB) ====')
print(len(path.sph(corpus='eval2000_swbd')))
print(len(path.wav(corpus='eval2000_swbd')))
print(len(path.htk(corpus='eval2000_swbd')))
print(len(path.trans(corpus='eval2000_swbd')))
print('==== eval2000 (CH) ====')
print(len(path.sph(corpus='eval2000_ch')))
print(len(path.wav(corpus='eval2000_ch')))
print(len(path.htk(corpus='eval2000_ch')))
print(len(path.trans(corpus='eval2000_ch')))
| mit |
kswiat/django | django/core/mail/backends/console.py | 696 | 1477 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
from django.utils import six
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def write_message(self, message):
msg = message.message()
msg_data = msg.as_bytes()
if six.PY3:
charset = msg.get_charset().get_output_charset() if msg.get_charset() else 'utf-8'
msg_data = msg_data.decode(charset)
self.stream.write('%s\n' % msg_data)
self.stream.write('-' * 79)
self.stream.write('\n')
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
msg_count = 0
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.write_message(message)
self.stream.flush() # flush after each message
msg_count += 1
if stream_created:
self.close()
except Exception:
if not self.fail_silently:
raise
return msg_count
| bsd-3-clause |
EnviroCentre/jython-upgrade | jython/lib/test/test_commands.py | 130 | 2640 | '''
Tests for commands module
Nick Mathewson
'''
import unittest
import os, tempfile, re
from test.test_support import run_unittest, reap_children, import_module, \
check_warnings
# Silence Py3k warning
commands = import_module('commands', deprecated=True)
# The module says:
# "NB This only works (and is only relevant) for UNIX."
#
# Actually, getoutput should work on any platform with an os.popen, but
# I'll take the comment as given, and skip this suite.
if os.name != 'posix':
raise unittest.SkipTest('Not posix; skipping test_commands')
class CommandTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(commands.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(commands.getstatusoutput('echo xyzzy'), (0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = commands.getstatusoutput('cat ' + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test_getstatus(self):
# This pattern should match 'ls -ld /.' on any posix
# system, however perversely configured. Even on systems
# (e.g., Cygwin) where user and group names can have spaces:
# drwxr-xr-x 15 Administ Domain U 4096 Aug 12 12:50 /
# drwxr-xr-x 15 Joe User My Group 4096 Aug 12 12:50 /
# Note that the first case above has a space in the group name
# while the second one has a space in both names.
# Special attributes supported:
# + = has ACLs
# @ = has Mac OS X extended attributes
# . = has a SELinux security context
pat = r'''d......... # It is a directory.
[.+@]? # It may have special attributes.
\s+\d+ # It has some number of links.
[^/]* # Skip user, group, size, and date.
/\. # and end with the name of the file.
'''
with check_warnings((".*commands.getstatus.. is deprecated",
DeprecationWarning)):
self.assertTrue(re.match(pat, commands.getstatus("/."), re.VERBOSE))
def test_main():
run_unittest(CommandTests)
reap_children()
if __name__ == "__main__":
test_main()
| mit |
kkappel/web2py-community | languages/uk.py | 1 | 14399 | # -*- coding: utf-8 -*-
{
'!langcode!': 'uk',
'!langname!': 'Українська',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Оновити" це додатковий вираз, такий, як "field1=\'нове_значення\'". Ви не можете змінювати або вилучати дані об\'єднаних таблиць.',
'%d days ago': '%d %%{день} тому',
'%d hours ago': '%d %%{годину} тому',
'%d minutes ago': '%d %%{хвилину} тому',
'%d months ago': '%d %%{місяць} тому',
'%d secods ago': '%d %%{секунду} тому',
'%d weeks ago': '%d %%{тиждень} тому',
'%d years ago': '%d %%{рік} тому',
'%s %%{row} deleted': 'Вилучено %s %%{рядок}',
'%s %%{row} updated': 'Змінено %s %%{рядок}',
'%s selected': 'Вибрано %s %%{запис}',
'%Y-%m-%d': '%Y/%m/%d',
'%Y-%m-%d %H:%M:%S': '%Y/%m/%d %H:%M:%S',
'1 day ago': '1 день тому',
'1 hour ago': '1 годину тому',
'1 minute ago': '1 хвилину тому',
'1 month ago': '1 місяць тому',
'1 second ago': '1 секунду тому',
'1 week ago': '1 тиждень тому',
'1 year ago': '1 рік тому',
'@markmin\x01(**%.0d MB**)': '(**``%.0d``:red МБ**)',
'@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{елемент(items)}, **%(bytes)s** %%{байт(bytes)}',
'@markmin\x01``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**нема в наявності**``:red (потребує Пітонівської бібліотеки [[guppy [посилання відкриється у новому вікні] http://pypi.python.org/pypi/guppy/ popup]])',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Сталась помилка, будь-ласка [[перевантажте %s]] сторінку',
'@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': "Час життя об'єктів в КЕШІ сягає **%(hours)02d** %%{годину(hours)} **%(min)02d** %%{хвилину(min)} та **%(sec)02d** %%{секунду(sec)}.",
'@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': "Час життя об'єктів в ДИСКОВОМУ КЕШІ сягає **%(hours)02d** %%{годину(hours)} **%(min)02d** %%{хвилину(min)} та **%(sec)02d** %%{секунду(sec)}.",
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Оцінка поцілювання: **%(ratio)s%%** (**%(hits)s** %%{поцілювання(hits)} та **%(misses)s** %%{схибнення(misses)})',
'@markmin\x01Number of entries: **%s**': 'Кількість входжень: ``**%s**``:red',
'@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': "Час життя об'єктів в ОЗП-КЕШІ сягає **%(hours)02d** %%{годину(hours)} **%(min)02d** %%{хвилину(min)} та **%(sec)02d** %%{секунду(sec)}.",
'About': 'Про додаток',
'Access Control': 'Контроль доступу',
'Administrative Interface': 'Адміністративний інтерфейс',
'Ajax Recipes': 'Рецепти для Ajax',
'appadmin is disabled because insecure channel': 'використовується незахищенний канал (HTTP). Appadmin вимкнено',
'Are you sure you want to delete this object?': "Ви впевнені, що хочете вилучити цей об'єкт?",
'Available Databases and Tables': 'Доступні бази даних та таблиці',
'Buy this book': 'Купити книжку',
'cache': 'кеш',
'Cache': 'Кеш',
'Cache Keys': 'Ключі кешу',
'Cannot be empty': 'Порожнє значення неприпустиме',
'Change password': 'Змінити пароль',
'Check to delete': 'Позначити для вилучення',
'Check to delete:': 'Позначте для вилучення:',
'Clear CACHE?': 'Очистити ВЕСЬ кеш?',
'Clear DISK': 'Очистити ДИСКОВИЙ кеш',
'Clear RAM': "Очистити кеш В ПАМ'ЯТІ",
'Client IP': 'IP клієнта',
'Community': 'Спільнота',
'Components and Plugins': 'Компоненти та втулки',
'Controller': 'Контролер',
'Copyright': 'Правовласник',
'Created By': 'Створив(ла)',
'Created On': 'Створено в',
'Current request': 'Поточний запит (current request)',
'Current response': 'Поточна відповідь (current response)',
'Current session': 'Поточна сесія (current session)',
'customize me!': 'причепуріть мене!',
'data uploaded': 'дані завантажено',
'Database': 'База даних',
'Database %s select': 'Вибірка з бази даних %s',
'Database Administration (appadmin)': 'Адміністрування Бази Даних (appadmin)',
'db': 'база даних',
'DB Model': 'Модель БД',
'Delete:': 'Вилучити:',
'Demo': 'Демо',
'Deployment Recipes': 'Способи розгортання',
'Description': 'Опис',
'design': 'налаштування',
'DISK': 'ДИСК',
'Disk Cache Keys': 'Ключі дискового кешу',
'Disk Cleared': 'Дисковий кеш очищено',
'Documentation': 'Документація',
"Don't know what to do?": 'Не знаєте що робити далі?',
'done!': 'зроблено!',
'Download': 'Завантажити',
'E-mail': 'Ел.пошта',
'edit': 'редагувати',
'Edit current record': 'Редагувати поточний запис',
'Edit Page': 'Редагувати сторінку',
'Email and SMS': 'Ел.пошта та SMS',
'enter a value': 'введіть значення',
'enter an integer between %(min)g and %(max)g': 'введіть ціле число між %(min)g та %(max)g',
'Error!': 'Помилка!',
'Errors': 'Помилки',
'Errors in form, please check it out.': 'У формі є помилка. Виправте її, будь-ласка.',
'export as csv file': 'експортувати як файл csv',
'FAQ': 'ЧаПи (FAQ)',
'First name': "Ім'я",
'Forgot username?': "Забули ім'я користувача?",
'Forms and Validators': 'Форми та коректність даних',
'Free Applications': 'Вільні додатки',
'Graph Model': 'Графова Модель',
'Group %(group_id)s created': 'Групу %(group_id)s створено',
'Group ID': 'Ідентифікатор групи',
'Group uniquely assigned to user %(id)s': "Група унікально зв'язана з користувачем %(id)s",
'Groups': 'Групи',
'Hello World': 'Привіт, світ!',
'Home': 'Початок',
'How did you get here?': 'Як цього було досягнуто?',
'import': 'Імпортувати',
'Import/Export': 'Імпорт/Експорт',
'insert new': 'Створити новий запис',
'insert new %s': 'створити новий запис %s',
'Internal State': 'Внутрішній стан',
'Introduction': 'Введення',
'Invalid email': 'Невірна адреса ел.пошти',
'Invalid login': "Невірне ім'я користувача",
'Invalid password': 'Невірний пароль',
'Invalid Query': 'Помилковий запит',
'invalid request': 'хибний запит',
'Is Active': 'Активна',
'Key': 'Ключ',
'Last name': 'Прізвище',
'Layout': 'Макет (Layout)',
'Layout Plugins': 'Втулки макетів',
'Layouts': 'Макети',
'Live Chat': 'Чат',
'Logged in': 'Вхід здійснено',
'Logged out': 'Вихід здійснено',
'Login': 'Вхід',
'Logout': 'Вихід',
'Lost Password': 'Забули пароль',
'Lost password?': 'Забули пароль?',
'Manage Cache': 'Управління кешем',
'Menu Model': 'Модель меню',
'Modified By': 'Зміни провадив(ла)',
'Modified On': 'Змінено в',
'My Sites': 'Сайт (усі додатки)',
'Name': "Ім'я",
'New password': 'Новий пароль',
'New Record': 'Новий запис',
'new record inserted': 'новий рядок додано',
'next 100 rows': 'наступні 100 рядків',
'No databases in this application': 'Даний додаток не використовує базу даних',
'now': 'зараз',
'Object or table name': "Об'єкт або назва таблиці",
'Old password': 'Старий пароль',
'Online examples': 'Зразковий демо-сайт',
'or import from csv file': 'або імпортувати з csv-файлу',
'Origin': 'Походження',
'Other Plugins': 'Інші втулки',
'Other Recipes': 'Інші рецепти',
'Overview': 'Огляд',
'Page Not Found!': 'Сторінку не знайдено!',
'Page saved': 'Сторінку збережено',
'Password': 'Пароль',
'Password changed': 'Пароль змінено',
"Password fields don't match": 'Пароль не співпав',
'please input your password again': 'Будь-ласка введіть пароль ще раз',
'Plugins': 'Втулки (Plugins)',
'Powered by': 'Працює на',
'Preface': 'Передмова',
'previous 100 rows': 'попередні 100 рядків',
'Profile': 'Параметри',
'Profile updated': 'Параметри змінено',
'pygraphviz library not found': 'Бібліотека pygraphviz не знайдена (не встановлена)',
'Python': 'Мова Python',
'Query:': 'Запит:',
'Quick Examples': 'Швидкі приклади',
'RAM': "ОПЕРАТИВНА ПАМ'ЯТЬ (ОЗП)",
'RAM Cache Keys': 'Ключі ОЗП-кешу',
'Ram Cleared': 'ОЗП-кеш очищено',
'Recipes': 'Рецепти',
'Record': 'запис',
'Record %(id)s updated': 'Запис %(id)s змінено',
'record does not exist': 'запису не існує',
'Record ID': 'Ід.запису',
'Record id': 'ід. запису',
'Record Updated': 'Запис змінено',
'Register': 'Реєстрація',
'Registration identifier': 'Реєстраційний ідентифікатор',
'Registration key': 'Реєстраційний ключ',
'Registration successful': 'Реєстрація пройшла успішно',
'Remember me (for 30 days)': "Запам'ятати мене (на 30 днів)",
'Request reset password': 'Запит на зміну пароля',
'Reset Password key': 'Ключ скидання пароля',
'Role': 'Роль',
'Rows in Table': 'Рядки в таблиці',
'Rows selected': 'Відмічено рядків',
'Save profile': 'Зберегти параметри',
'Semantic': 'Семантика',
'Services': 'Сервіс',
'Size of cache:': 'Розмір кешу:',
'state': 'стан',
'Statistics': 'Статистика',
'Stylesheet': 'CSS-стилі',
'submit': 'застосувати',
'Submit': 'Застосувати',
'Support': 'Підтримка',
'Table': 'Таблиця',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Запит" це умова, на зразок "db.table1.field1==\'значення\'". Вираз "db.table1.field1==db.table2.field2" повертає результат об\'єднання (SQL JOIN) таблиць.',
'The Core': 'Ядро',
'The output of the file is a dictionary that was rendered by the view %s': 'Результат функції - словник пар (назва=значення) було відображено з допомогою відображення (view) %s',
'The Views': 'Відображення (Views)',
'This App': 'Цей додаток',
'This email already has an account': 'Вказана адреса ел.пошти вже зареєстрована',
'Time in Cache (h:m:s)': 'Час знаходження в кеші (h:m:s)',
'Timestamp': 'Відмітка часу',
'too short': 'Занадто короткий',
'Twitter': 'Твіттер',
'unable to parse csv file': 'не вдається розібрати csv-файл',
'Update:': 'Оновити:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Для створення складних запитів використовуйте (...)&(...) замість AND, (...)|(...) замість OR, та ~(...) замість NOT.',
'User %(id)s Logged-in': 'Користувач %(id)s увійшов',
'User %(id)s Logged-out': 'Користувач %(id)s вийшов',
'User %(id)s Password changed': 'Користувач %(id)s змінив свій пароль',
'User %(id)s Password reset': 'Користувач %(id)s скинув пароль',
'User %(id)s Profile updated': 'Параметри користувача %(id)s змінено',
'User %(id)s Registered': 'Користувач %(id)s зареєструвався',
'User ID': 'Ід.користувача',
'value already in database or empty': 'значення вже в базі даних або порожнє',
'Verify Password': 'Повторити пароль',
'Videos': 'Відео',
'View': 'Відображення (View)',
'Welcome': 'Ласкаво просимо',
'Welcome to web2py!': 'Ласкаво просимо до web2py!',
'Which called the function %s located in the file %s': 'Управління передалось функції %s, яка розташована у файлі %s',
'Working...': 'Працюємо...',
'You are successfully running web2py': 'Ви успішно запустили web2py',
'You can modify this application and adapt it to your needs': 'Ви можете модифікувати цей додаток і адаптувати його до своїх потреб',
'You visited the url %s': 'Ви відвідали наступну адресу: %s',
}
| mit |
wmvanvliet/mne-python | mne/externals/tqdm/_tqdm/notebook.py | 14 | 9067 | """
IPython/Jupyter Notebook progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm.notebook import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
import sys
from .utils import _range
# to inherit from the tqdm class
from .std import tqdm as std_tqdm
if True: # pragma: no cover
# import IPython/Jupyter base widget and display utilities
IPY = 0
IPYW = 0
try: # IPython 4.x
import ipywidgets
IPY = 4
try:
IPYW = int(ipywidgets.__version__.split('.')[0])
except AttributeError: # __version__ may not exist in old versions
pass
except ImportError: # IPython 3.x / 2.x
IPY = 32
import warnings
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
message=".*The `IPython.html` package has been deprecated.*")
try:
import IPython.html.widgets as ipywidgets
except ImportError:
pass
try: # IPython 4.x / 3.x
if IPY == 32:
from IPython.html.widgets import FloatProgress as IProgress
from IPython.html.widgets import HBox, HTML
IPY = 3
else:
from ipywidgets import FloatProgress as IProgress
from ipywidgets import HBox, HTML
except ImportError:
try: # IPython 2.x
from IPython.html.widgets import FloatProgressWidget as IProgress
from IPython.html.widgets import ContainerWidget as HBox
from IPython.html.widgets import HTML
IPY = 2
except ImportError:
IPY = 0
try:
from IPython.display import display # , clear_output
except ImportError:
pass
# HTML encoding
try: # Py3
from html import escape
except ImportError: # Py2
from cgi import escape
__author__ = {"github.com/": ["lrq3000", "casperdcl", "alexanderkuk"]}
__all__ = ['tqdm_notebook', 'tnrange', 'tqdm', 'trange']
class tqdm_notebook(std_tqdm):
"""
Experimental IPython/Jupyter Notebook widget using tqdm!
"""
@staticmethod
def status_printer(_, total=None, desc=None, ncols=None):
"""
Manage the printing of an IPython/Jupyter Notebook progress bar widget.
"""
# Fallback to text bar if there's no total
# DEPRECATED: replaced with an 'info' style bar
# if not total:
# return super(tqdm_notebook, tqdm_notebook).status_printer(file)
# fp = file
# Prepare IPython progress bar
try:
if total:
pbar = IProgress(min=0, max=total)
else: # No total? Show info style bar with no progress tqdm status
pbar = IProgress(min=0, max=1)
pbar.value = 1
pbar.bar_style = 'info'
except NameError:
# #187 #451 #558
raise ImportError(
"FloatProgress not found. Please update jupyter and ipywidgets."
" See https://ipywidgets.readthedocs.io/en/stable"
"/user_install.html")
if desc:
pbar.description = desc
if IPYW >= 7:
pbar.style.description_width = 'initial'
# Prepare status text
ptext = HTML()
# Only way to place text to the right of the bar is to use a container
container = HBox(children=[pbar, ptext])
# Prepare layout
if ncols is not None: # use default style of ipywidgets
# ncols could be 100, "100px", "100%"
ncols = str(ncols) # ipywidgets only accepts string
try:
if int(ncols) > 0: # isnumeric and positive
ncols += 'px'
except ValueError:
pass
pbar.layout.flex = '2'
container.layout.width = ncols
container.layout.display = 'inline-flex'
container.layout.flex_flow = 'row wrap'
display(container)
return container
def display(self, msg=None, pos=None,
# additional signals
close=False, bar_style=None):
# Note: contrary to native tqdm, msg='' does NOT clear bar
# goal is to keep all infos if error happens so user knows
# at which iteration the loop failed.
# Clear previous output (really necessary?)
# clear_output(wait=1)
if not msg and not close:
msg = self.__repr__()
pbar, ptext = self.container.children
pbar.value = self.n
if msg:
# html escape special characters (like '&')
if '<bar/>' in msg:
left, right = map(escape, msg.split('<bar/>', 1))
else:
left, right = '', escape(msg)
# remove inesthetical pipes
if left and left[-1] == '|':
left = left[:-1]
if right and right[0] == '|':
right = right[1:]
# Update description
pbar.description = left
if IPYW >= 7:
pbar.style.description_width = 'initial'
# never clear the bar (signal: msg='')
if right:
ptext.value = right
# Change bar style
if bar_style:
# Hack-ish way to avoid the danger bar_style being overridden by
# success because the bar gets closed after the error...
if not (pbar.bar_style == 'danger' and bar_style == 'success'):
pbar.bar_style = bar_style
# Special signal to close the bar
if close and pbar.bar_style != 'danger': # hide only if no error
try:
self.container.close()
except AttributeError:
self.container.visible = False
def __init__(self, *args, **kwargs):
# Setup default output
file_kwarg = kwargs.get('file', sys.stderr)
if file_kwarg is sys.stderr or file_kwarg is None:
kwargs['file'] = sys.stdout # avoid the red block in IPython
# Initialize parent class + avoid printing by using gui=True
kwargs['gui'] = True
kwargs.setdefault('bar_format', '{l_bar}{bar}{r_bar}')
kwargs['bar_format'] = kwargs['bar_format'].replace('{bar}', '<bar/>')
super(tqdm_notebook, self).__init__(*args, **kwargs)
if self.disable or not kwargs['gui']:
return
# Get bar width
self.ncols = '100%' if self.dynamic_ncols else kwargs.get("ncols", None)
# Replace with IPython progress bar display (with correct total)
unit_scale = 1 if self.unit_scale is True else self.unit_scale or 1
total = self.total * unit_scale if self.total else self.total
self.container = self.status_printer(
self.fp, total, self.desc, self.ncols)
self.sp = self.display
# Print initial bar state
if not self.disable:
self.display()
def __iter__(self, *args, **kwargs):
try:
for obj in super(tqdm_notebook, self).__iter__(*args, **kwargs):
# return super(tqdm...) will not catch exception
yield obj
# NB: except ... [ as ...] breaks IPython async KeyboardInterrupt
except: # NOQA
self.sp(bar_style='danger')
raise
def update(self, *args, **kwargs):
try:
super(tqdm_notebook, self).update(*args, **kwargs)
except Exception as exc:
# cannot catch KeyboardInterrupt when using manual tqdm
# as the interrupt will most likely happen on another statement
self.sp(bar_style='danger')
raise exc
def close(self, *args, **kwargs):
super(tqdm_notebook, self).close(*args, **kwargs)
# If it was not run in a notebook, sp is not assigned, check for it
if hasattr(self, 'sp'):
# Try to detect if there was an error or KeyboardInterrupt
# in manual mode: if n < total, things probably got wrong
if self.total and self.n < self.total:
self.sp(bar_style='danger')
else:
if self.leave:
self.sp(bar_style='success')
else:
self.sp(close=True)
def moveto(self, *args, **kwargs):
# void -> avoid extraneous `\n` in IPython output cell
return
def tnrange(*args, **kwargs):
"""
A shortcut for `tqdm.notebook.tqdm(xrange(*args), **kwargs)`.
On Python3+, `range` is used instead of `xrange`.
"""
return tqdm_notebook(_range(*args), **kwargs)
# Aliases
tqdm = tqdm_notebook
trange = tnrange
| bsd-3-clause |
mirror/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Common/FdfClassObject.py | 11 | 4053 | ## @file
# This file is used to define each component of FDF file
#
# Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from FdfParserLite import FdfParser
from Table.TableFdf import TableFdf
from CommonDataClass.DataClass import MODEL_FILE_FDF, MODEL_PCD, MODEL_META_DATA_COMPONENT
from String import NormPath
## FdfObject
#
# This class defined basic Fdf object which is used by inheriting
#
# @param object: Inherited from object class
#
class FdfObject(object):
def __init__(self):
object.__init__()
## Fdf
#
# This class defined the structure used in Fdf object
#
# @param FdfObject: Inherited from FdfObject class
# @param Filename: Input value for Ffilename of Fdf file, default is None
# @param WorkspaceDir: Input value for current workspace directory, default is None
#
class Fdf(FdfObject):
def __init__(self, Filename = None, IsToDatabase = False, WorkspaceDir = None, Database = None):
self.WorkspaceDir = WorkspaceDir
self.IsToDatabase = IsToDatabase
self.Cur = Database.Cur
self.TblFile = Database.TblFile
self.TblFdf = Database.TblFdf
self.FileID = -1
self.FileList = {}
#
# Load Fdf file if filename is not None
#
if Filename != None:
self.LoadFdfFile(Filename)
#
# Insert a FDF file record into database
#
def InsertFile(self, Filename):
FileID = -1
Filename = NormPath(Filename)
if Filename not in self.FileList:
FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_FDF)
self.FileList[Filename] = FileID
return self.FileList[Filename]
## Load Fdf file
#
# Load the file if it exists
#
# @param Filename: Input value for filename of Fdf file
#
def LoadFdfFile(self, Filename):
FileList = []
#
# Parse Fdf file
#
Filename = NormPath(Filename)
Fdf = FdfParser(Filename)
Fdf.ParseFile()
#
# Insert inf file and pcd information
#
if self.IsToDatabase:
(Model, Value1, Value2, Value3, Arch, BelongsToItem, BelongsToFile, StartLine, StartColumn, EndLine, EndColumn, Enabled) = \
(0, '', '', '', 'COMMON', -1, -1, -1, -1, -1, -1, 0)
for Index in range(0, len(Fdf.Profile.PcdDict)):
pass
for Key in Fdf.Profile.PcdDict.keys():
Model = MODEL_PCD
Value1 = ''
Value2 = ".".join((Key[1], Key[0]))
FileName = Fdf.Profile.PcdFileLineDict[Key][0]
StartLine = Fdf.Profile.PcdFileLineDict[Key][1]
BelongsToFile = self.InsertFile(FileName)
self.TblFdf.Insert(Model, Value1, Value2, Value3, Arch, BelongsToItem, BelongsToFile, StartLine, StartColumn, EndLine, EndColumn, Enabled)
for Index in range(0, len(Fdf.Profile.InfList)):
Model = MODEL_META_DATA_COMPONENT
Value1 = Fdf.Profile.InfList[Index]
Value2 = ''
FileName = Fdf.Profile.InfFileLineList[Index][0]
StartLine = Fdf.Profile.InfFileLineList[Index][1]
BelongsToFile = self.InsertFile(FileName)
self.TblFdf.Insert(Model, Value1, Value2, Value3, Arch, BelongsToItem, BelongsToFile, StartLine, StartColumn, EndLine, EndColumn, Enabled)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
| gpl-2.0 |
Ivoz/pip | pip/_vendor/distlib/scripts.py | 163 | 11979 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, fsencode, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and os.name == 'nt':
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
if self.executable:
executable = self.executable
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv():
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else:
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
executable = fsencode(executable)
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and os.name == 'nt'
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else:
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher:
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if os.name == 'nt' and not outname.endswith('.' + ext):
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
shebang = self._get_shebang('utf-8', options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError:
if not self.dry_run:
raise
f = None
else:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
first_line = f.readline()
if not first_line:
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line:
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt':
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| mit |
jianC/lexikon-2.6.35-gb-mr_udev | tools/perf/scripts/python/syscall-counts.py | 944 | 1429 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40d %10d\n" % (id, val),
| gpl-2.0 |
cybermx/linux-2.6-imx | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
thobbs/python-driver | tests/integration/cqlengine/test_consistency.py | 4 | 4404 | # Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from uuid import uuid4
from cassandra import ConsistencyLevel as CL, ConsistencyLevel
from cassandra.cluster import Session
from cassandra.cqlengine import columns
from cassandra.cqlengine import connection
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.query import BatchQuery
from tests.integration.cqlengine.base import BaseCassEngTestCase
class TestConsistencyModel(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
count = columns.Integer()
text = columns.Text(required=False)
class BaseConsistencyTest(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(BaseConsistencyTest, cls).setUpClass()
sync_table(TestConsistencyModel)
@classmethod
def tearDownClass(cls):
super(BaseConsistencyTest, cls).tearDownClass()
drop_table(TestConsistencyModel)
class TestConsistency(BaseConsistencyTest):
def test_create_uses_consistency(self):
qs = TestConsistencyModel.consistency(CL.ALL)
with mock.patch.object(self.session, 'execute') as m:
qs.create(text="i am not fault tolerant this way")
args = m.call_args
self.assertEqual(CL.ALL, args[0][0].consistency_level)
def test_queryset_is_returned_on_create(self):
qs = TestConsistencyModel.consistency(CL.ALL)
self.assertTrue(isinstance(qs, TestConsistencyModel.__queryset__), type(qs))
def test_update_uses_consistency(self):
t = TestConsistencyModel.create(text="bacon and eggs")
t.text = "ham sandwich"
with mock.patch.object(self.session, 'execute') as m:
t.consistency(CL.ALL).save()
args = m.call_args
self.assertEqual(CL.ALL, args[0][0].consistency_level)
def test_batch_consistency(self):
with mock.patch.object(self.session, 'execute') as m:
with BatchQuery(consistency=CL.ALL) as b:
TestConsistencyModel.batch(b).create(text="monkey")
args = m.call_args
self.assertEqual(CL.ALL, args[0][0].consistency_level)
with mock.patch.object(self.session, 'execute') as m:
with BatchQuery() as b:
TestConsistencyModel.batch(b).create(text="monkey")
args = m.call_args
self.assertNotEqual(CL.ALL, args[0][0].consistency_level)
def test_blind_update(self):
t = TestConsistencyModel.create(text="bacon and eggs")
t.text = "ham sandwich"
uid = t.id
with mock.patch.object(self.session, 'execute') as m:
TestConsistencyModel.objects(id=uid).consistency(CL.ALL).update(text="grilled cheese")
args = m.call_args
self.assertEqual(CL.ALL, args[0][0].consistency_level)
def test_delete(self):
# ensures we always carry consistency through on delete statements
t = TestConsistencyModel.create(text="bacon and eggs")
t.text = "ham and cheese sandwich"
uid = t.id
with mock.patch.object(self.session, 'execute') as m:
t.consistency(CL.ALL).delete()
with mock.patch.object(self.session, 'execute') as m:
TestConsistencyModel.objects(id=uid).consistency(CL.ALL).delete()
args = m.call_args
self.assertEqual(CL.ALL, args[0][0].consistency_level)
def test_default_consistency(self):
# verify global assumed default
self.assertEqual(Session._default_consistency_level, ConsistencyLevel.LOCAL_ONE)
# verify that this session default is set according to connection.setup
# assumes tests/cqlengine/__init__ setup uses CL.ONE
session = connection.get_session()
self.assertEqual(session.default_consistency_level, ConsistencyLevel.ONE)
| apache-2.0 |
ravenland/ycmWinRepo | python/ycm/client/completion_request.py | 6 | 2522 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm import vimsupport
from ycmd.utils import ToUtf8IfNeeded
from ycm.client.base_request import BaseRequest, JsonFromFuture
TIMEOUT_SECONDS = 0.5
class CompletionRequest( BaseRequest ):
def __init__( self, request_data ):
super( CompletionRequest, self ).__init__()
self.request_data = request_data
def Start( self ):
self._response_future = self.PostDataToHandlerAsync( self.request_data,
'completions',
TIMEOUT_SECONDS )
def Done( self ):
return self._response_future.done()
def Response( self ):
if not self._response_future:
return []
try:
return _ConvertCompletionResponseToVimDatas(
JsonFromFuture( self._response_future ) )
except Exception as e:
vimsupport.PostVimMessage( str( e ) )
return []
def _ConvertCompletionDataToVimData( completion_data ):
# see :h complete-items for a description of the dictionary fields
vim_data = {
'word' : ToUtf8IfNeeded( completion_data[ 'insertion_text' ] ),
'dup' : 1,
}
if 'menu_text' in completion_data:
vim_data[ 'abbr' ] = ToUtf8IfNeeded( completion_data[ 'menu_text' ] )
if 'extra_menu_info' in completion_data:
vim_data[ 'menu' ] = ToUtf8IfNeeded( completion_data[ 'extra_menu_info' ] )
if 'kind' in completion_data:
vim_data[ 'kind' ] = ToUtf8IfNeeded(
completion_data[ 'kind' ] )[ 0 ].lower()
if 'detailed_info' in completion_data:
vim_data[ 'info' ] = ToUtf8IfNeeded( completion_data[ 'detailed_info' ] )
return vim_data
def _ConvertCompletionResponseToVimDatas( response_data ):
return [ _ConvertCompletionDataToVimData( x )
for x in response_data[ 'completions' ] ]
| gpl-3.0 |
coffenbacher/askbot-devel | askbot/migrations/0136_auto__add_group__add_threadtogroup__add_unique_threadtogroup_thread_ta.py | 15 | 36423 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Group'
db.create_table('askbot_group', (
('group_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.Group'], unique=True, primary_key=True)),
('logo_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True)),
('moderate_email', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_open', self.gf('django.db.models.fields.BooleanField')(default=False)),
('preapproved_emails', self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True)),
('preapproved_email_domains', self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True)),
))
db.send_create_signal('askbot', ['Group'])
# Adding field 'PostToGroup.group'
db.add_column('askbot_post_groups', 'group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['askbot.Group'], null=True, blank=True), keep_default=False)
db.add_column('askbot_thread_groups', 'group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['askbot.Group'], null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'ThreadToGroup', fields ['thread', 'tag']
db.delete_unique('askbot_thread_groups', ['thread_id', 'tag_id'])
# Deleting model 'Group'
db.delete_table('askbot_group')
# Deleting model 'ThreadToGroup'
db.delete_table('askbot_thread_groups')
# Deleting field 'PostToGroup.group'
db.delete_column('askbot_post_groups', 'group_id')
# Changing field 'QuestionWidget.search_query'
db.alter_column('askbot_questionwidget', 'search_query', self.gf('django.db.models.fields.CharField')(default=None, max_length=50))
# Adding M2M table for field groups on 'Thread'
db.create_table('askbot_thread_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('thread', models.ForeignKey(orm['askbot.thread'], null=False)),
('tag', models.ForeignKey(orm['askbot.tag'], null=False))
))
db.create_unique('askbot_thread_groups', ['thread_id', 'tag_id'])
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Post']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.askwidget': {
'Meta': {'object_name': 'AskWidget'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'groups'", 'null': 'True', 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_text_field': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'inner_style': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'outer_style': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.draftanswer': {
'Meta': {'object_name': 'DraftAnswer'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draft_answers'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draft_answers'", 'to': "orm['askbot.Thread']"})
},
'askbot.draftquestion': {
'Meta': {'object_name': 'DraftQuestion'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'})
},
'askbot.emailfeedsetting': {
'Meta': {'unique_together': "(('subscriber', 'feed_type'),)", 'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.group': {
'Meta': {'object_name': 'Group', '_ormbases': ['auth.Group']},
'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logo_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'moderate_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preapproved_email_domains': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'preapproved_emails': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'})
},
'askbot.groupmembership': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupMembership'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_memberships'", 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group_memberships'", 'to': "orm['auth.User']"})
},
'askbot.groupprofile': {
'Meta': {'object_name': 'GroupProfile'},
'group_tag': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'group_profile'", 'unique': 'True', 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logo_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'moderate_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preapproved_email_domains': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'preapproved_emails': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'group_posts'", 'symmetrical': 'False', 'through': "orm['askbot.PostToGroup']", 'to': "orm['askbot.Tag']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'old_answer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_question_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'posts'", 'null': 'True', 'blank': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.postflagreason': {
'Meta': {'object_name': 'PostFlagReason'},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'details': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_reject_reasons'", 'to': "orm['askbot.Post']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('post', 'revision'),)", 'object_name': 'PostRevision'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'approved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'by_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.posttogroup': {
'Meta': {'unique_together': "(('post', 'tag'),)", 'object_name': 'PostToGroup', 'db_table': "'askbot_post_groups'"},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']"}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Tag']"})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Post']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.questionwidget': {
'Meta': {'object_name': 'QuestionWidget'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Tag']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_by': ('django.db.models.fields.CharField', [], {'default': "'-added_at'", 'max_length': '18'}),
'question_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '7'}),
'search_query': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.TextField', [], {'default': '"\\n@import url(\'http://fonts.googleapis.com/css?family=Yanone+Kaffeesatz:300,400,700\');\\nbody {\\n overflow: hidden;\\n}\\n\\n#container {\\n width: 200px;\\n height: 350px;\\n}\\nul {\\n list-style: none;\\n padding: 5px;\\n margin: 5px;\\n}\\nli {\\n border-bottom: #CCC 1px solid;\\n padding-bottom: 5px;\\n padding-top: 5px;\\n}\\nli:last-child {\\n border: none;\\n}\\na {\\n text-decoration: none;\\n color: #464646;\\n font-family: \'Yanone Kaffeesatz\', sans-serif;\\n font-size: 15px;\\n}\\n"', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'askbot.replyaddress': {
'Meta': {'object_name': 'ReplyAddress'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'allowed_from_email': ('django.db.models.fields.EmailField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reply_addresses'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'reply_action': ('django.db.models.fields.CharField', [], {'default': "'auto_answer_or_comment'", 'max_length': '32'}),
'response_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'edit_addresses'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'used_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'suggested_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'suggested_tags'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'tag_wiki': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'described_tag'", 'unique': 'True', 'null': 'True', 'to': "orm['askbot.Post']"}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'group_threads'", 'symmetrical': 'False', 'through': "orm['askbot.ThreadToGroup']", 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.threadtogroup': {
'Meta': {'unique_together': "(('thread', 'tag'),)", 'object_name': 'ThreadToGroup', 'db_table': "'askbot_thread_groups'"},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Tag']"}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_signature': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_fake': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_marked_tags': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'subscribed_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 |
NeCTAR-RC/neutron | neutron/tests/unit/plugins/oneconvergence/test_nvsdlib.py | 43 | 10951 | # Copyright 2014 OneConvergence, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from oslo_serialization import jsonutils
from neutron.plugins.oneconvergence.lib import nvsdlib
from neutron.tests import base
NETWORKS_URI = "/pluginhandler/ocplugin/tenant/%s/lnetwork/"
NETWORK_URI = NETWORKS_URI + "%s"
GET_ALL_NETWORKS = "/pluginhandler/ocplugin/tenant/getallnetworks"
SUBNETS_URI = NETWORK_URI + "/lsubnet/"
SUBNET_URI = SUBNETS_URI + "%s"
GET_ALL_SUBNETS = "/pluginhandler/ocplugin/tenant/getallsubnets"
PORTS_URI = NETWORK_URI + "/lport/"
PORT_URI = PORTS_URI + "%s"
EXT_URI = "/pluginhandler/ocplugin/ext/tenant/%s"
FLOATING_IPS_URI = EXT_URI + "/floatingip/"
FLOATING_IP_URI = FLOATING_IPS_URI + "%s"
ROUTERS_URI = EXT_URI + "/lrouter/"
ROUTER_URI = ROUTERS_URI + "%s"
TEST_NET = 'test-network'
TEST_SUBNET = 'test-subnet'
TEST_PORT = 'test-port'
TEST_FIP = 'test-floatingip'
TEST_ROUTER = 'test-router'
TEST_TENANT = 'test-tenant'
class TestNVSDApi(base.BaseTestCase):
def setUp(self):
super(TestNVSDApi, self).setUp()
self.nvsdlib = nvsdlib.NVSDApi()
def test_create_network(self):
network_obj = {
"name": 'test-net',
"tenant_id": TEST_TENANT,
"shared": False,
"admin_state_up": True,
"router:external": False
}
resp = mock.Mock()
resp.json.return_value = {'id': 'uuid'}
with mock.patch.object(self.nvsdlib, 'send_request',
return_value=resp) as send_request:
uri = NETWORKS_URI % TEST_TENANT
net = self.nvsdlib.create_network(network_obj)
send_request.assert_called_once_with(
"POST", uri,
body=jsonutils.dumps(network_obj),
resource='network',
tenant_id=TEST_TENANT)
self.assertEqual(net, {'id': 'uuid'})
def test_update_network(self):
network = {'id': TEST_NET,
'tenant_id': TEST_TENANT}
update_network = {'name': 'new_name'}
uri = NETWORK_URI % (TEST_TENANT, TEST_NET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.update_network(network, update_network)
send_request.assert_called_once_with(
"PUT", uri, body=jsonutils.dumps(update_network),
resource='network', tenant_id=TEST_TENANT,
resource_id=TEST_NET)
def test_delete_network(self):
network = {'id': TEST_NET,
'tenant_id': TEST_TENANT}
uri = NETWORK_URI % (TEST_TENANT, TEST_NET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
with mock.patch.object(self.nvsdlib, '_get_ports'):
self.nvsdlib.delete_network(network)
send_request.assert_called_once_with(
"DELETE", uri, resource='network',
tenant_id=TEST_TENANT, resource_id=TEST_NET)
def test_create_port(self):
path = PORTS_URI % (TEST_TENANT, TEST_NET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
fixed_ips = [{'ip_address': '10.0.0.2',
'subnet_id': TEST_SUBNET}]
lport = {
"id": TEST_PORT,
"name": 'test',
"device_id": "device_id",
"device_owner": "device_owner",
"mac_address": "mac_address",
"fixed_ips": fixed_ips,
"admin_state_up": True,
"network_id": TEST_NET,
"status": 'ACTIVE'
}
self.nvsdlib.create_port(TEST_TENANT, lport)
expected = {"id": TEST_PORT, "name": 'test',
"device_id": "device_id",
"device_owner": "device_owner",
"mac_address": "mac_address",
"ip_address": '10.0.0.2',
"subnet_id": TEST_SUBNET,
"admin_state_up": True,
"network_id": TEST_NET,
"status": 'ACTIVE'}
send_request.assert_called_once_with(
"POST", path,
body=jsonutils.dumps(expected),
resource='port',
tenant_id=TEST_TENANT)
def test_update_port(self):
port = {'id': TEST_PORT,
'network_id': TEST_NET}
port_update = {'name': 'new-name'}
uri = PORT_URI % (TEST_TENANT, TEST_NET, TEST_PORT)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.update_port(TEST_TENANT, port, port_update)
send_request.assert_called_once_with(
"PUT", uri,
body=jsonutils.dumps(port_update),
resource='port',
resource_id='test-port',
tenant_id=TEST_TENANT)
def test_delete_port(self):
port = {'network_id': TEST_NET,
'tenant_id': TEST_TENANT}
uri = PORT_URI % (TEST_TENANT, TEST_NET, TEST_PORT)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.delete_port(TEST_PORT, port)
send_request.assert_called_once_with("DELETE", uri,
resource='port',
tenant_id=TEST_TENANT,
resource_id=TEST_PORT)
def test_create_subnet(self):
subnet = {'id': TEST_SUBNET,
'tenant_id': TEST_TENANT,
'network_id': TEST_NET}
uri = SUBNETS_URI % (TEST_TENANT, TEST_NET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.create_subnet(subnet)
send_request.assert_called_once_with("POST", uri,
body=jsonutils.dumps(subnet),
resource='subnet',
tenant_id=TEST_TENANT)
def test_update_subnet(self):
subnet = {'id': TEST_SUBNET,
'tenant_id': TEST_TENANT,
'network_id': TEST_NET}
subnet_update = {'name': 'new-name'}
uri = SUBNET_URI % (TEST_TENANT, TEST_NET, TEST_SUBNET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.update_subnet(subnet, subnet_update)
send_request.assert_called_once_with(
"PUT", uri,
body=jsonutils.dumps(subnet_update), resource='subnet',
tenant_id=TEST_TENANT, resource_id=TEST_SUBNET)
def test_delete_subnet(self):
subnet = {'id': TEST_SUBNET,
'tenant_id': TEST_TENANT,
'network_id': TEST_NET}
uri = SUBNET_URI % (TEST_TENANT, TEST_NET, TEST_SUBNET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.delete_subnet(subnet)
send_request.assert_called_once_with("DELETE", uri,
resource='subnet',
tenant_id=TEST_TENANT,
resource_id=TEST_SUBNET)
def test_create_floatingip(self):
floatingip = {'id': TEST_FIP,
'tenant_id': TEST_TENANT}
uri = FLOATING_IPS_URI % TEST_TENANT
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.create_floatingip(floatingip)
send_request.assert_called_once_with(
"POST", uri,
body=jsonutils.dumps(floatingip),
resource='floating_ip',
tenant_id=TEST_TENANT)
def test_update_floatingip(self):
floatingip = {'id': TEST_FIP,
'tenant_id': TEST_TENANT}
uri = FLOATING_IP_URI % (TEST_TENANT, TEST_FIP)
floatingip_update = {'floatingip': {'router_id': TEST_ROUTER}}
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.update_floatingip(floatingip, floatingip_update)
send_request.assert_called_once_with(
"PUT", uri,
body=jsonutils.dumps(floatingip_update['floatingip']),
resource='floating_ip', tenant_id=TEST_TENANT,
resource_id=TEST_FIP)
def test_delete_floatingip(self):
floatingip = {'id': TEST_FIP,
'tenant_id': TEST_TENANT}
uri = FLOATING_IP_URI % (TEST_TENANT, TEST_FIP)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.delete_floatingip(floatingip)
send_request.assert_called_once_with(
"DELETE", uri, resource='floating_ip', tenant_id=TEST_TENANT,
resource_id=TEST_FIP)
def test_create_router(self):
router = {'id': TEST_ROUTER, 'tenant_id': TEST_TENANT}
uri = ROUTERS_URI % TEST_TENANT
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.create_router(router)
send_request.assert_called_once_with(
"POST", uri, body=jsonutils.dumps(router), resource='router',
tenant_id=TEST_TENANT)
def test_update_router(self):
router = {'id': TEST_ROUTER, 'tenant_id': TEST_TENANT}
uri = ROUTER_URI % (TEST_TENANT, TEST_ROUTER)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.update_router(router)
send_request.assert_called_once_with(
"PUT", uri, body=jsonutils.dumps(router),
resource='router', tenant_id=TEST_TENANT,
resource_id=TEST_ROUTER)
def test_delete_router(self):
uri = ROUTER_URI % (TEST_TENANT, TEST_ROUTER)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.delete_router(TEST_TENANT, TEST_ROUTER)
send_request.assert_called_once_with(
"DELETE", uri, resource='router',
tenant_id=TEST_TENANT, resource_id=TEST_ROUTER)
| apache-2.0 |
ndp-systemes/odoo-addons | purchase_order_quantities_improved/tests/test_order_quantities.py | 1 | 10095 | # -*- coding: utf8 -*-
#
# Copyright (C) 2014 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp.tests import common
class TestOrderQuantities(common.TransactionCase):
def setUp(self):
super(TestOrderQuantities, self).setUp()
self.supplier1 = self.browse_ref('purchase_order_quantities_improved.supplier1')
self.location1 = self.browse_ref('stock.stock_location_stock')
self.pricelist1 = self.browse_ref('purchase.list0')
self.product1 = self.browse_ref('purchase_order_quantities_improved.product1')
self.supplierinfo1 = self.browse_ref('purchase_order_quantities_improved.supplierinfo1')
self.supplierinfo2 = self.browse_ref('purchase_order_quantities_improved.supplierinfo2')
def test_10_order_quantity_calculation(self):
"""
Testing function create under the minimal quantity, then function write under and over the minimal quantity.
"""
procurement_order_1 = self.env['procurement.order'].create({
'name': "Procurement Order 1",
'product_id': self.ref('purchase_order_quantities_improved.product1'),
'product_qty': 7,
'warehouse_id': self.ref('stock.warehouse0'),
'location_id': self.ref('stock.stock_location_stock'),
'date_planned': "2015-05-04 15:00:00",
'product_uom': self.ref('product.product_uom_unit')
})
procurement_order_1.run()
self.assertEqual(procurement_order_1.state, u'running')
purchase_order_line = self.env['purchase.order.line'].search([('product_id', '=',
procurement_order_1.product_id.id)])
self.assertTrue(purchase_order_line)
self.assertEqual(len(purchase_order_line), 1)
# po_qty should be 36
self.assertEqual(purchase_order_line.product_qty, 36)
procurement_order_3 = self.env['procurement.order'].create({
'name': "Procurement Order 3",
'product_id': self.ref('purchase_order_quantities_improved.product1'),
'product_qty': 7,
'warehouse_id': self.ref('stock.warehouse0'),
'location_id': self.ref('stock.stock_location_stock'),
'date_planned': "2015-05-04 15:00:00",
'product_uom': self.ref('product.product_uom_unit')
})
procurement_order_3.run()
self.assertEqual(procurement_order_3.state, u'running')
purchase_order_line = self.env['purchase.order.line'].search([('product_id', '=',
procurement_order_1.product_id.id)])
self.assertTrue(purchase_order_line)
# po_qty should be 36
self.assertEqual(purchase_order_line.product_qty, 36)
procurement_order_2 = self.env['procurement.order'].create({
'name': "Procurement Order 2",
'product_id': self.ref('purchase_order_quantities_improved.product1'),
'product_qty': 40,
'warehouse_id': self.ref('stock.warehouse0'),
'location_id': self.ref('stock.stock_location_stock'),
'date_planned': "2015-05-04 15:00:00",
'product_uom': self.ref('product.product_uom_unit')
})
procurement_order_2.run()
self.assertEqual(procurement_order_2.state, u'running')
purchase_order_line = self.env['purchase.order.line'].search([('product_id', '=',
procurement_order_2.product_id.id)])
self.assertTrue(purchase_order_line)
# po_qty should be 60
self.assertEqual(purchase_order_line.product_qty, 60)
def test_20_order_quantity_calculation(self):
"""
Testing function create over the maximal quantity.
"""
procurement_order_2 = self.env['procurement.order'].create({
'name': "Procurement Order 2",
'product_id': self.ref('purchase_order_quantities_improved.product1'),
'product_qty': 40,
'warehouse_id': self.ref('stock.warehouse0'),
'location_id': self.ref('stock.stock_location_stock'),
'date_planned': "2015-05-04 15:00:00",
'product_uom': self.ref('product.product_uom_unit')
})
procurement_order_2.run()
purchase_order_line = self.env['purchase.order.line'].search([('product_id', '=',
procurement_order_2.product_id.id)])
# po_qty should be 48
self.assertEqual(purchase_order_line.product_qty, 48)
def test_30_order_quantity_calculation(self):
"""
Testing how different uom are working together
"""
procurement_order_4 = self.env['procurement.order'].create({
'name': "Procurement Order 4",
'product_id': self.ref('purchase_order_quantities_improved.product2'),
'product_qty': 3,
'warehouse_id': self.ref('stock.warehouse0'),
'location_id': self.ref('stock.stock_location_stock'),
'date_planned': "2015-05-04 15:00:00",
'product_uom': self.ref('product.product_uom_unit')
})
procurement_order_4.run()
self.assertEqual(procurement_order_4.state, u'running')
purchase_order_line = self.env['purchase.order.line'].search([('product_id', '=',
procurement_order_4.product_id.id)])
self.assertTrue(purchase_order_line)
# po_qty should be 4
self.assertEqual(purchase_order_line.product_qty, 4)
def test_40_order_quantity_calculation(self):
"""
Testing modified functions create and write.
When the po is created by the operator, those two functions should not overwrite it when creation a
procurement order line, product_qty can not be under the product_min_qty: useless to test this situation
"""
purchase_order_1 = self.env['purchase.order'].create({
"name": 'Purchase order 1',
"partner_id": self.supplier1.id,
"date_order": '2015-05-04 15:00:00',
"location_id": self.location1.id,
"pricelist_id": self.pricelist1.id,
})
purchase_order_line_1 = self.env['purchase.order.line'].create({
"name": "Purchase order line 1",
"product_id": self.product1.id,
"price_unit": 10.0,
"order_id": purchase_order_1.id,
"product_qty": 36.5,
"date_planned": '2015-05-04 15:00:00',
})
# po_qty should be still 3.5 (testing function create which should not be modified at that time
self.assertEqual(purchase_order_line_1.product_qty, 36.5)
purchase_order_line_1.product_qty = 39.5
# po_qty should be still 5.5 (testing function write, which should not be modified at that time
self.assertEqual(purchase_order_line_1.product_qty, 39.5)
def test_50_order_quantity_calculation(self):
"""
Testing calculation without supplierinfo
"""
procurement_order_1 = self.env['procurement.order'].create({
'name': "Procurement Order 1",
'product_id': self.ref('purchase_order_quantities_improved.product1'),
'product_qty': 7,
'warehouse_id': self.ref('stock.warehouse0'),
'location_id': self.ref('stock.stock_location_stock'),
'date_planned': "2015-05-04 15:00:00",
'product_uom': self.ref('product.product_uom_unit')
})
procurement_order_1.run()
purchase_order_line = self.env['purchase.order.line'].search([('product_id', '=',
procurement_order_1.product_id.id)])
self.assertTrue(purchase_order_line)
# po_qty should be 36
self.assertEqual(purchase_order_line.product_qty, 36)
procurement_order_3 = self.env['procurement.order'].create({
'name': "Procurement Order 3",
'product_id': self.ref('purchase_order_quantities_improved.product1'),
'product_qty': 7,
'warehouse_id': self.ref('stock.warehouse0'),
'location_id': self.ref('stock.stock_location_stock'),
'date_planned': "2015-05-04 15:00:00",
'product_uom': self.ref('product.product_uom_unit')
})
procurement_order_3.run()
self.assertEqual(procurement_order_3.state, u'running')
purchase_order_line = self.env['purchase.order.line'].search([('product_id', '=',
procurement_order_1.product_id.id)])
self.assertTrue(purchase_order_line)
# po_qty should be 36
self.assertEqual(purchase_order_line.product_qty, 36)
self.supplierinfo1.unlink()
self.supplierinfo2.unlink()
procurement_order_3.cancel()
purchase_order_line = self.env['purchase.order.line'].search([('product_id', '=',
procurement_order_1.product_id.id)])
self.assertTrue(purchase_order_line)
# po_qty should be 36
self.assertEqual(purchase_order_line.product_qty, 29)
| agpl-3.0 |
ant1b/Planets4X | pythonanywhere_app/static/Brython3.0.0-20141104-210332/Lib/string.py | 734 | 9410 | """A collection of string constants.
Public module variables:
whitespace -- a string containing all ASCII whitespace
ascii_lowercase -- a string containing all ASCII lowercase letters
ascii_uppercase -- a string containing all ASCII uppercase letters
ascii_letters -- a string containing all ASCII letters
digits -- a string containing all ASCII decimal digits
hexdigits -- a string containing all ASCII hexadecimal digits
octdigits -- a string containing all ASCII octal digits
punctuation -- a string containing all ASCII punctuation characters
printable -- a string containing all ASCII characters considered printable
"""
import _string
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
####################################################################
import re as _re
from collections import ChainMap
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
class Template(metaclass=_TemplateMetaclass):
"""A string class for supporting $-substitutions."""
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
flags = _re.IGNORECASE
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(keepends=True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named') or mo.group('braced')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return mo.group()
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return mo.group()
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
########################################################################
# the Formatter class
# see PEP 3101 for details and purpose of this class
# The hard parts are reused from the C implementation. They're exposed as "_"
# prefixed methods of str.
# The overall parser is implemented in _string.formatter_parser.
# The field name parser is implemented in _string.formatter_field_name_split
class Formatter:
def format(self, format_string, *args, **kwargs):
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec = self._vformat(format_spec, args, kwargs,
used_args, recursion_depth-1)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
return ''.join(result)
def get_value(self, key, args, kwargs):
if isinstance(key, int):
return args[key]
else:
return kwargs[key]
def check_unused_args(self, used_args, args, kwargs):
pass
def format_field(self, value, format_spec):
return format(value, format_spec)
def convert_field(self, value, conversion):
# do any conversion on the resulting object
if conversion is None:
return value
elif conversion == 's':
return str(value)
elif conversion == 'r':
return repr(value)
elif conversion == 'a':
return ascii(value)
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
# literal_text can be zero length
# field_name can be None, in which case there's no
# object to format and output
# if field_name is not None, it is looked up, formatted
# with format_spec and conversion and then used
def parse(self, format_string):
return _string.formatter_parser(format_string)
# given a field_name, find the object it references.
# field_name: the field being looked up, e.g. "0.name"
# or "lookup[3]"
# used_args: a set of which args have been used
# args, kwargs: as passed in to vformat
def get_field(self, field_name, args, kwargs):
first, rest = _string.formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
# loop through the rest of the field_name, doing
# getattr or getitem as needed
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[i]
return obj, first
| mit |
chfw/pyexcel-io | examples/custom_yaml_writer.py | 2 | 1135 | import yaml
from pyexcel_io import save_data
from pyexcel_io.plugins import IOPluginInfoChainV2
from pyexcel_io.plugin_api import IWriter, ISheetWriter
class MySheetWriter(ISheetWriter):
def __init__(self, sheet_reference):
self.native_sheet = sheet_reference
def write_row(self, data_row):
self.native_sheet.append(data_row)
def close(self):
pass
class MyWriter(IWriter):
def __init__(self, file_name, file_type, **keywords):
self.file_name = file_name
self.content = {}
def create_sheet(self, name):
array = []
self.content[name] = array
return MySheetWriter(array)
def close(self):
with open(self.file_name, "w") as f:
f.write(yaml.dump(self.content, default_flow_style=False))
IOPluginInfoChainV2(__name__).add_a_writer(
relative_plugin_class_path="MyWriter",
locations=["file"],
file_types=["yaml"],
stream_type="text",
)
if __name__ == "__main__":
data_dict = {
"sheet 1": [[1, 3, 4], [2, 4, 9]],
"sheet 2": [["B", "C", "D"]],
}
save_data("mytest.yaml", data_dict)
| bsd-3-clause |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/timeseries/python/timeseries/head.py | 5 | 19570 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeseries head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.summary import summary
class _NoStatePredictOutput(export_lib.PredictOutput):
def as_signature_def(self, receiver_tensors):
no_state_receiver_tensors = {
key: value for key, value in receiver_tensors.items()
if not key.startswith(feature_keys.State.STATE_PREFIX)}
return super(_NoStatePredictOutput, self).as_signature_def(
receiver_tensors=no_state_receiver_tensors)
class TimeSeriesRegressionHead(head_lib._Head): # pylint:disable=protected-access
"""Determines input and output signatures for a time series model."""
def __init__(self,
model,
state_manager,
optimizer,
input_statistics_generator=None,
name=None):
"""Creates a `_Head` for time series regression.
Args:
model: A model for time series regression.
state_manager: A state manager.
optimizer: An optimizer.
input_statistics_generator: A input statistics generator.
name: An optional name for the model.
"""
self.model = model
self.state_manager = state_manager
self.optimizer = optimizer
self.input_statistics_generator = input_statistics_generator
self._name = name
@property
def name(self):
return self._name
# TODO(terrytangyuan): consolidate `model_outputs` and `_Head.LossSpec`
# once `_Head.create_loss` becomes extendable
def create_loss(self, features, mode, logits=None, labels=None):
"""See `_Head`."""
model_outputs = self.state_manager.define_loss(
self.model, features, mode)
summary.scalar(
head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),
model_outputs.loss)
return model_outputs
@property
def logits_dimension(self):
"""See `_Head`."""
return 1
def _train_ops(self, features):
"""Add training ops to the graph."""
mode = estimator_lib.ModeKeys.TRAIN
with variable_scope.variable_scope(
"model",
# Use ResourceVariables to avoid race conditions.
use_resource=True):
model_outputs = self.create_loss(features, mode)
train_op = optimizers.optimize_loss(
model_outputs.loss,
global_step=training_util.get_global_step(),
optimizer=self.optimizer,
# Learning rate is set in the Optimizer object
learning_rate=None)
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=mode,
train_op=train_op)
def _evaluate_ops(self, features):
"""Add ops for evaluation (aka filtering) to the graph."""
mode = estimator_lib.ModeKeys.EVAL
with variable_scope.variable_scope("model", use_resource=True):
model_outputs = self.create_loss(features, mode)
metrics = {}
# Just output in-sample predictions for the last chunk seen
for prediction_key, prediction_value in model_outputs.predictions.items():
metrics[prediction_key] = _identity_metric_single(prediction_key,
prediction_value)
metrics[feature_keys.FilteringResults.TIMES] = _identity_metric_single(
feature_keys.FilteringResults.TIMES, model_outputs.prediction_times)
metrics[feature_keys.FilteringResults.STATE_TUPLE] = (
_identity_metric_nested(feature_keys.FilteringResults.STATE_TUPLE,
model_outputs.end_state))
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=mode,
eval_metric_ops=metrics,
# needed for custom metrics.
predictions=model_outputs.predictions)
def _predict_ops(self, features):
"""Add ops for prediction to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
prediction = self.model.predict(features=features)
prediction[feature_keys.PredictionResults.TIMES] = features[
feature_keys.PredictionFeatures.TIMES]
return estimator_lib.EstimatorSpec(
predictions=prediction, mode=estimator_lib.ModeKeys.PREDICT)
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
prediction_outputs = self.model.predict(features=features)
with variable_scope.variable_scope("model", reuse=True):
filtering_outputs = self.create_loss(
features, estimator_lib.ModeKeys.EVAL)
with variable_scope.variable_scope("model", reuse=True):
no_state_features = {
k: v for k, v in features.items()
if not k.startswith(feature_keys.State.STATE_PREFIX)}
# Ignore any state management when cold-starting. The model's default
# start state is replicated across the batch.
cold_filtering_outputs = self.model.define_loss(
features=no_state_features, mode=estimator_lib.ModeKeys.EVAL)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
export_lib.PredictOutput(prediction_outputs),
feature_keys.SavedModelLabels.FILTER:
export_lib.PredictOutput(
state_to_dictionary(filtering_outputs.end_state)),
feature_keys.SavedModelLabels.COLD_START_FILTER:
_NoStatePredictOutput(
state_to_dictionary(cold_filtering_outputs.end_state))
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _convert_feature_to_tensor(self, name, value):
"""Casts features to the correct dtype based on their name."""
if name in [
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.PredictionFeatures.TIMES
]:
return math_ops.cast(value, dtypes.int64)
if name == feature_keys.TrainEvalFeatures.VALUES:
return math_ops.cast(value, self.model.dtype)
if name == feature_keys.PredictionFeatures.STATE_TUPLE:
return value # Correct dtypes are model-dependent
return ops.convert_to_tensor(value)
def _gather_state(self, features):
"""Returns `features` with state packed, indicates if packing was done."""
prefixed_state_re = re.compile(r"^" + feature_keys.State.STATE_PREFIX +
r"_(\d+)$")
numbered_state = []
for key, tensor in features.items():
search_result = prefixed_state_re.search(key)
if search_result:
numbered_state.append((int(search_result.group(1)), key, tensor))
if not numbered_state:
return features, False
features = features.copy()
for _, key, _ in numbered_state:
del features[key]
numbered_state.sort(key=lambda number, *_: number)
features[feature_keys.State.STATE_TUPLE] = nest.pack_sequence_as(
structure=self.model.get_start_state(),
flat_sequence=[tensor for _, _, tensor in numbered_state])
return features, True
def create_estimator_spec(self, features, mode, labels=None):
"""Performs basic error checking and returns an EstimatorSpec."""
with ops.name_scope(self._name, "head"):
if labels is not None and labels != {}: # for better error messages.
raise ValueError(
"The model received a `labels`, which is not supported. "
"Pass '{}' and '{}' as features.".format(
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.TrainEvalFeatures.VALUES))
del labels
features = {
name: self._convert_feature_to_tensor(name=name, value=value)
for name, value in features.items()
}
if self.input_statistics_generator is not None:
input_statistics = self.input_statistics_generator.initialize_graph(
features, update_statistics=(mode == estimator_lib.ModeKeys.TRAIN))
else:
input_statistics = None
self.model.initialize_graph(input_statistics=input_statistics)
# _gather_state requires the model to have its graph initialized (so it
# has access to the structure of the model's state)
features, passed_flat_state = self._gather_state(features)
if (mode == estimator_lib.ModeKeys.TRAIN or
mode == estimator_lib.ModeKeys.EVAL):
_check_train_eval_features(features, self.model)
elif mode == estimator_lib.ModeKeys.PREDICT:
_check_predict_features(features)
else:
raise ValueError("Unknown mode '{}' passed to model_fn.".format(mode))
self.state_manager.initialize_graph(
model=self.model, input_statistics=input_statistics)
if mode == estimator_lib.ModeKeys.TRAIN:
return self._train_ops(features)
elif mode == estimator_lib.ModeKeys.EVAL:
return self._evaluate_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and not passed_flat_state:
return self._predict_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and passed_flat_state:
# The mode is PREDICT, but we're actually in export_savedmodel for
# serving. We want to return two graphs: one for filtering (state + data
# -> state) and one for predicting (state -> prediction).
return self._serving_ops(features)
class OneShotPredictionHead(TimeSeriesRegressionHead):
"""A time series head which exports a single stateless serving signature.
The serving default signature exported by this head expects `times`, `values`,
and any exogenous features, but no state. `values` has shape `[batch_size,
filter_length, num_features]` and `times` has shape `[batch_size,
total_length]`, where `total_length > filter_length`. Any exogenous features
must have their shapes prefixed by the shape of the `times` feature.
When serving, first performs filtering on the series up to `filter_length`
starting from the default start state for the model, then computes predictions
on the remainder of the series, returning them.
Model state is neither accepted nor returned, so filtering must be performed
each time predictions are requested when using this head.
"""
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
filtering_features = {}
prediction_features = {}
values_length = array_ops.shape(
features[feature_keys.FilteringFeatures.VALUES])[1]
for key, value in features.items():
if key == feature_keys.State.STATE_TUPLE:
# Ignore state input. The model's default start state is replicated
# across the batch.
continue
if key == feature_keys.FilteringFeatures.VALUES:
filtering_features[key] = value
else:
filtering_features[key] = value[:, :values_length]
prediction_features[key] = value[:, values_length:]
cold_filtering_outputs = self.model.define_loss(
features=filtering_features, mode=estimator_lib.ModeKeys.EVAL)
prediction_features[feature_keys.State.STATE_TUPLE] = (
cold_filtering_outputs.end_state)
with variable_scope.variable_scope("model", reuse=True):
prediction_outputs = self.model.predict(
features=prediction_features)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
_NoStatePredictOutput(prediction_outputs),
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _check_feature_shapes_compatible_with(features,
compatible_with_name,
compatible_with_value,
ignore=None):
"""Checks all features are compatible with the given time-like feature."""
if ignore is None:
ignore = set()
for name, value in features.items():
if name in ignore:
continue
feature_shape = value.get_shape()
if feature_shape.ndims is None:
continue
if feature_shape.ndims < 2:
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"(got rank {} for feature '{}')").format(feature_shape.ndims, name))
if not feature_shape[:2].is_compatible_with(
compatible_with_value.get_shape()):
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"where batch dimension and window size match the "
"'{times_feature}' feature (got shape {feature_shape} for "
"feature '{feature_name}' but shape {times_shape} for feature "
"'{times_feature}')").format(
times_feature=compatible_with_name,
feature_shape=feature_shape,
feature_name=name,
times_shape=compatible_with_value.get_shape()))
def _check_predict_features(features):
"""Raises errors if features are not suitable for prediction."""
if feature_keys.PredictionFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.TIMES))
if feature_keys.PredictionFeatures.STATE_TUPLE not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.STATE_TUPLE))
times_feature = features[feature_keys.PredictionFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.PredictionFeatures.TIMES,
times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.PredictionFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
feature_keys.PredictionFeatures.STATE_TUPLE # Model-dependent shapes
]))
def _check_train_eval_features(features, model):
"""Raise errors if features are not suitable for training/evaluation."""
if feature_keys.TrainEvalFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.VALUES))
times_feature = features[feature_keys.TrainEvalFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.TrainEvalFeatures.TIMES,
times_feature.get_shape()))
values_feature = features[feature_keys.TrainEvalFeatures.VALUES]
if not values_feature.get_shape().is_compatible_with(
[None, None, model.num_features]):
raise ValueError(
("Expected shape (batch dimension, window size, {num_features}) "
"for feature '{feature_name}', since the model was configured "
"with num_features={num_features} (got shape {got_shape})").format(
num_features=model.num_features,
feature_name=feature_keys.TrainEvalFeatures.VALUES,
got_shape=times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.TrainEvalFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
feature_keys.State.STATE_TUPLE # Model-dependent shapes
]))
def _identity_metric_single(name, input_tensor):
"""A metric which takes on its last updated value.
This keeps evaluation metrics in sync with one another, since update ops are
run separately from their result Tensors. Simply returning (input_tensor,
no_op) as a metric with a value but no update means that a metric will come
from a different batch of data than metrics which cache values in a Variable
(e.g. the default loss metric).
Args:
name: A name for the metric.
input_tensor: Any Tensor.
Returns:
A tuple of (value, update_op).
"""
metric_variable = variable_scope.variable(
name="{}_identity_metric".format(name),
initial_value=array_ops.zeros([], dtype=input_tensor.dtype),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False)
update_op = state_ops.assign(
metric_variable, input_tensor, validate_shape=False)
# This shape will be correct once the first update runs (but may be
# incomplete, so is not helpful for initializing the variable).
metric_variable.set_shape(input_tensor.get_shape())
return (metric_variable.value(), update_op)
def _identity_metric_nested(name, input_tensors):
"""Create identity metrics for a nested tuple of Tensors."""
update_ops = []
value_tensors = []
for tensor_number, tensor in enumerate(nest.flatten(input_tensors)):
value_tensor, update_op = _identity_metric_single(
name="{}_{}".format(name, tensor_number), input_tensor=tensor)
update_ops.append(update_op)
value_tensors.append(value_tensor)
return (nest.pack_sequence_as(input_tensors, value_tensors),
control_flow_ops.group(*update_ops))
def state_to_dictionary(state_tuple):
"""Flatten model state into a dictionary with string keys."""
flattened = {}
for state_number, state_value in enumerate(nest.flatten(state_tuple)):
prefixed_state_name = "{}_{:02d}".format(feature_keys.State.STATE_PREFIX,
state_number)
flattened[prefixed_state_name] = state_value
return flattened
| apache-2.0 |
louiskun/flaskGIT | venv/lib/python2.7/site-packages/pip/vcs/mercurial.py | 514 | 3472 | from __future__ import absolute_import
import logging
import os
import tempfile
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
from pip._vendor.six.moves import configparser
logger = logging.getLogger(__name__)
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
def export(self, location):
"""Export the Hg repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
self.run_command(
['archive', location], show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.SafeConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
with open(repo_config, 'w') as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning(
'Could not switch Mercurial repository to %s: %s', url, exc,
)
else:
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'], cwd=dest)
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning hg %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['clone', '--noupdate', '-q', url, dest])
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = self.run_command(
['showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if self._is_local_repository(url):
url = path_to_url(url)
return url.strip()
def get_revision(self, location):
current_revision = self.run_command(
['parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
def get_revision_hash(self, location):
current_rev_hash = self.run_command(
['parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo.lower().startswith('hg:'):
repo = 'hg+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev_hash = self.get_revision_hash(location)
return '%s@%s#egg=%s' % (repo, current_rev_hash, egg_project_name)
def check_version(self, dest, rev_options):
"""Always assume the versions don't match"""
return False
vcs.register(Mercurial)
| mit |
pvtodorov/indra | indra/tools/reading/run_drum_reading.py | 1 | 4658 | import sys
import json
import time
import pickle
import logging
import argparse
from indra.sources.trips import process_xml
from indra.sources.trips.drum_reader import DrumReader
logger = logging.getLogger('indra.tools.reading.run_drum_reading')
def read_pmid_sentences(pmid_sentences, **drum_args):
"""Read sentences from a PMID-keyed dictonary and return all Statements
Parameters
----------
pmid_sentences : dict[str, list[str]]
A dictonary where each key is a PMID pointing to a list of sentences
to be read.
**drum_args
Keyword arguments passed directly to the DrumReader. Typical
things to specify are `host` and `port`. If `run_drum` is specified
as True, this process will internally run the DRUM reading system
as a subprocess. Otherwise, DRUM is expected to be running
independently.
Returns
-------
all_statements : list[indra.statement.Statement]
A list of INDRA Statements resulting from the reading
"""
def _set_pmid(statements, pmid):
for stmt in statements:
for evidence in stmt.evidence:
evidence.pmid = pmid
# See if we need to start DRUM as a subprocess
run_drum = drum_args.get('run_drum', False)
drum_process = None
all_statements = {}
# Iterate over all the keys and sentences to read
for pmid, sentences in pmid_sentences.items():
logger.info('================================')
logger.info('Processing %d sentences for %s' % (len(sentences), pmid))
ts = time.time()
# Make a DrumReader instance
drum_args['name'] = 'DrumReader%s' % pmid
dr = DrumReader(**drum_args)
time.sleep(3)
# If there is no DRUM process set yet, we get the one that was
# just started by the DrumReader
if run_drum and drum_process is None:
drum_args.pop('run_drum', None)
drum_process = dr.drum_system
# By setting this, we ensuer that the reference to the
# process is passed in to all future DrumReaders
drum_args['drum_system'] = drum_process
# Now read each sentence for this key
for sentence in sentences:
dr.read_text(sentence)
# Start receiving results and exit when done
try:
dr.start()
except SystemExit:
pass
statements = []
# Process all the extractions into INDRA Statements
for extraction in dr.extractions:
# Sometimes we get nothing back
if not extraction:
continue
tp = process_xml(extraction)
statements += tp.statements
# Set the PMIDs for the evidences of the Statements
_set_pmid(statements, pmid)
te = time.time()
logger.info('Reading took %d seconds and produced %d Statements.' %
(te-ts, len(statements)))
all_statements[pmid] = statements
# If we were running a DRUM process, we should kill it
if drum_process and dr.drum_system:
dr._kill_drum()
return all_statements
def read_text(text, **drum_args):
"""Read sentences from a PMID-keyed dictonary and return all Statements
Parameters
----------
text : str
A block of text to run DRUM on
**drum_args
Keyword arguments passed directly to the DrumReader. Typical
things to specify are 'host' and 'port'.
Returns
-------
statements : list[indra.statement.Statement]
A list of INDRA Statements resulting from the reading
"""
return read_pmid_sentences({'PMID': text}, **drum_args)
def read_pmc(pmcid, **drum_args):
# TODO: run DRUM in PMC reading mode here
return
def save_results(statements, out_fname):
with open(out_fname, 'wb') as fh:
pickle.dump(statements, fh)
def make_parser():
parser = argparse.ArgumentParser(description="Run DRUM reading on a file.")
# TODO: We should probably handle defaults better, particularly host/port.
parser.add_argument('file_name', help="The name of the file to be read.")
parser.add_argument('host', help="The host on which DRUM is running.")
parser.add_argument('port', help="The port to which the DRUM process is "
"listening.")
return parser
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
with open(args.file_name, 'rt') as fh:
content = json.load(fh)
statements = read_pmid_sentences(content, host=args.host, port=args.port)
save_results(statements, 'results.pkl')
| bsd-2-clause |
Changaco/oh-mainline | vendor/packages/PyYaml/lib3/yaml/dumper.py | 277 | 2723 |
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
from .emitter import *
from .serializer import *
from .representer import *
from .resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| agpl-3.0 |
poiati/django | django/contrib/flatpages/forms.py | 357 | 2024 | from django import forms
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext, ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/\.~]+$',
help_text=_("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_messages={
"invalid": _("This value must contain only letters, numbers,"
" dots, underscores, dashes, slashes or tildes."),
},
)
class Meta:
model = FlatPage
fields = '__all__'
def clean_url(self):
url = self.cleaned_data['url']
if not url.startswith('/'):
raise forms.ValidationError(
ugettext("URL is missing a leading slash."),
code='missing_leading_slash',
)
if (settings.APPEND_SLASH and
'django.middleware.common.CommonMiddleware' in settings.MIDDLEWARE_CLASSES and
not url.endswith('/')):
raise forms.ValidationError(
ugettext("URL is missing a trailing slash."),
code='missing_trailing_slash',
)
return url
def clean(self):
url = self.cleaned_data.get('url')
sites = self.cleaned_data.get('sites')
same_url = FlatPage.objects.filter(url=url)
if self.instance.pk:
same_url = same_url.exclude(pk=self.instance.pk)
if sites and same_url.filter(sites__in=sites).exists():
for site in sites:
if same_url.filter(sites=site).exists():
raise forms.ValidationError(
_('Flatpage with url %(url)s already exists for site %(site)s'),
code='duplicate_url',
params={'url': url, 'site': site},
)
return super(FlatpageForm, self).clean()
| bsd-3-clause |
ansible/ansible | test/units/module_utils/common/validation/test_check_required_by.py | 10 | 2642 | # -*- coding: utf-8 -*-
# Copyright: (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils._text import to_native
from ansible.module_utils.common.validation import check_required_by
@pytest.fixture
def path_arguments_terms():
return {
"path": ["mode", "owner"],
}
def test_check_required_by():
arguments_terms = {}
params = {}
assert check_required_by(arguments_terms, params) == {}
def test_check_required_by_missing():
arguments_terms = {
"force": "force_reason",
}
params = {"force": True}
expected = "missing parameter(s) required by 'force': force_reason"
with pytest.raises(TypeError) as e:
check_required_by(arguments_terms, params)
assert to_native(e.value) == expected
def test_check_required_by_multiple(path_arguments_terms):
params = {
"path": "/foo/bar",
}
expected = "missing parameter(s) required by 'path': mode, owner"
with pytest.raises(TypeError) as e:
check_required_by(path_arguments_terms, params)
assert to_native(e.value) == expected
def test_check_required_by_single(path_arguments_terms):
params = {"path": "/foo/bar", "mode": "0700"}
expected = "missing parameter(s) required by 'path': owner"
with pytest.raises(TypeError) as e:
check_required_by(path_arguments_terms, params)
assert to_native(e.value) == expected
def test_check_required_by_missing_none(path_arguments_terms):
params = {
"path": "/foo/bar",
"mode": "0700",
"owner": "root",
}
assert check_required_by(path_arguments_terms, params)
def test_check_required_by_options_context(path_arguments_terms):
params = {"path": "/foo/bar", "mode": "0700"}
options_context = ["foo_context"]
expected = "missing parameter(s) required by 'path': owner found in foo_context"
with pytest.raises(TypeError) as e:
check_required_by(path_arguments_terms, params, options_context)
assert to_native(e.value) == expected
def test_check_required_by_missing_multiple_options_context(path_arguments_terms):
params = {
"path": "/foo/bar",
}
options_context = ["foo_context"]
expected = (
"missing parameter(s) required by 'path': mode, owner found in foo_context"
)
with pytest.raises(TypeError) as e:
check_required_by(path_arguments_terms, params, options_context)
assert to_native(e.value) == expected
| gpl-3.0 |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/sqlite3/dump.py | 1 | 1847 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: dump.py
def _iterdump(connection):
"""
Returns an iterator to the dump of the database in an SQL text format.
Used to produce an SQL dump of the database. Useful to save an in-memory
database for later restoration. This function should not be called
directly but instead called from the Connection method, iterdump().
"""
cu = connection.cursor()
yield 'BEGIN TRANSACTION;'
q = "\n SELECT name, type, sql\n FROM sqlite_master\n WHERE sql NOT NULL AND\n type == 'table'\n "
schema_res = cu.execute(q)
for table_name, type, sql in schema_res.fetchall():
if table_name == 'sqlite_sequence':
yield 'DELETE FROM sqlite_sequence;'
elif table_name == 'sqlite_stat1':
yield 'ANALYZE sqlite_master;'
elif table_name.startswith('sqlite_'):
continue
else:
yield '%s;' % sql
res = cu.execute("PRAGMA table_info('%s')" % table_name)
column_names = [ str(table_info[1]) for table_info in res.fetchall() ]
q = 'SELECT \'INSERT INTO "%(tbl_name)s" VALUES('
q += ','.join([ "'||quote(" + col + ")||'" for col in column_names ])
q += ")' FROM '%(tbl_name)s'"
query_res = cu.execute(q % {'tbl_name': table_name})
for row in query_res:
yield '%s;' % row[0]
q = "\n SELECT name, type, sql\n FROM sqlite_master\n WHERE sql NOT NULL AND\n type IN ('index', 'trigger', 'view')\n "
schema_res = cu.execute(q)
for name, type, sql in schema_res.fetchall():
yield '%s;' % sql
yield 'COMMIT;' | unlicense |
edx/edx-platform | lms/djangoapps/grades/tests/test_api.py | 5 | 4174 | """ Tests calling the grades api directly """
from unittest.mock import patch
import ddt
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.grades import api
from lms.djangoapps.grades.models import PersistentSubsectionGrade, PersistentSubsectionGradeOverride
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@ddt.ddt
class OverrideSubsectionGradeTests(ModuleStoreTestCase):
"""
Tests for the override subsection grades api call
"""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = UserFactory()
cls.overriding_user = UserFactory()
cls.signal_patcher = patch('lms.djangoapps.grades.signals.signals.SUBSECTION_OVERRIDE_CHANGED.send')
cls.signal_patcher.start()
cls.id_patcher = patch('lms.djangoapps.grades.api.create_new_event_transaction_id')
cls.mock_create_id = cls.id_patcher.start()
cls.mock_create_id.return_value = 1
cls.type_patcher = patch('lms.djangoapps.grades.api.set_event_transaction_type')
cls.type_patcher.start()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.signal_patcher.stop()
cls.id_patcher.stop()
cls.type_patcher.stop()
def setUp(self):
super().setUp()
self.course = CourseFactory.create(org='edX', number='DemoX', display_name='Demo_Course', run='Spring2019')
self.subsection = ItemFactory.create(parent=self.course, category="subsection", display_name="Subsection")
self.grade = PersistentSubsectionGrade.update_or_create_grade(
user_id=self.user.id,
course_id=self.course.id,
usage_key=self.subsection.location,
first_attempted=None,
visible_blocks=[],
earned_all=6.0,
possible_all=6.0,
earned_graded=5.0,
possible_graded=5.0
)
def tearDown(self):
super().tearDown()
PersistentSubsectionGradeOverride.objects.all().delete() # clear out all previous overrides
@ddt.data(0.0, None, 3.0)
def test_override_subsection_grade(self, earned_graded):
api.override_subsection_grade(
self.user.id,
self.course.id,
self.subsection.location,
overrider=self.overriding_user,
earned_graded=earned_graded,
comment='Test Override Comment',
)
override_obj = api.get_subsection_grade_override(
self.user.id,
self.course.id,
self.subsection.location
)
assert override_obj is not None
assert override_obj.earned_graded_override == earned_graded
assert override_obj.override_reason == 'Test Override Comment'
for i in range(3):
override_obj.override_reason = 'this field purposefully left blank'
override_obj.earned_graded_override = i
override_obj.save()
api.override_subsection_grade(
self.user.id,
self.course.id,
self.subsection.location,
overrider=self.overriding_user,
earned_graded=earned_graded,
comment='Test Override Comment 2',
)
override_obj = api.get_subsection_grade_override(
self.user.id,
self.course.id,
self.subsection.location
)
assert override_obj is not None
assert override_obj.earned_graded_override == earned_graded
assert override_obj.override_reason == 'Test Override Comment 2'
assert 5 == len(override_obj.history.all())
for history_entry in override_obj.history.all():
if history_entry.override_reason.startswith('Test Override Comment'):
assert self.overriding_user == history_entry.history_user
assert self.overriding_user.id == history_entry.history_user_id
else:
assert history_entry.history_user is None
assert history_entry.history_user_id is None
| agpl-3.0 |
nkalodimas/invenio | modules/websubmit/lib/functions/Notify_URL.py | 25 | 5236 | ## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os
from invenio.bibtask import \
task_low_level_submission, \
bibtask_allocate_sequenceid
from invenio.websubmit_functions.Shared_Functions import ParamFromFile
def Notify_URL(parameters, curdir, form, user_info=None):
"""
Access a given URL, and possibly post some content.
Could be used to notify that a record has been fully integrated.
(the URL is only accessed once the BibTask created by this
function runs in BibSched, not the when the function is run. The
BibTask uses a task sequence ID to respect ordering of tasks)
if URL is empty, skip the notification.
@param parameters: (dictionary) - contains the following parameter
strings used by this function:
+ url: (string) - the URL to be contacted by this function
(must start with http/https)
If value starts with "FILE:", will look for
the URL in a file on curdir with the given name.
for eg: "FILE:my_url"
(value retrieved when function is run)
+ data: (string) - (optional) the data to be posted at the
given URL. if no value is given, the URL
will be accessed via GET.
If value starts with "FILE:", will look for
the data in a file on curdir with the given name.
for eg: "FILE:my_data"
(value retrieved when function is run)
+ content_type: (string) - (optional) the content-type to use
to post data. Default is 'text/plain'.
Ignored if not data is posted.
+ attempt_times: (int) - (optional) up to how many time shall
we try to contact the URL in case we
fail at contacting it?
+ attempt_sleeptime: (int) - (optional) how many seconds to
sleep between each attempt?
+ admin_emails: (string) - (optional) list of emails (comma-separated
values) to contact in case the URL
cannot be accessed after all attempts.
If value starts with "FILE:", will look for
the emails in a file on curdir with the given name.
for eg: "FILE:my_email"
(value retrieved when function is run)
+ user: (string) - the user to be used to launch the task
(visible in BibSched). If value starts
with"FILE:", will look for the emails in a file on
curdir with the given name.
for eg:"FILE:my_user"
(value retrieved when function is run)
"""
other_bibtasklet_arguments = []
sequence_id = bibtask_allocate_sequenceid(curdir)
url = parameters["url"]
data = parameters["data"]
admin_emails = parameters["admin_emails"]
content_type = parameters["content_type"]
attempt_times = parameters["attempt_times"]
attempt_sleeptime = parameters["attempt_sleeptime"]
user = parameters["user"]
# Maybe some params must be read from disk
if url.startswith('FILE:'):
url = ParamFromFile(os.path.join(curdir, url[5:]))
if not url:
return ""
if data.startswith('FILE:'):
data = ParamFromFile(os.path.join(curdir, data[5:]))
if admin_emails.startswith('FILE:'):
admin_emails = ParamFromFile(os.path.join(curdir, admin_emails[5:]))
if user.startswith('FILE:'):
user = ParamFromFile(os.path.join(curdir, user[5:]))
if data:
other_bibtasklet_arguments.extend(("-a", "data=%s" % data))
other_bibtasklet_arguments.extend(("-a", "content_type=%s" % content_type))
return task_low_level_submission(
"bibtasklet", user, "-T", "bst_notify_url",
"-I", str(sequence_id),
"-a", "url=%s" % url,
"-a", "attempt_times=%s" % attempt_times,
"-a", "attempt_sleeptime=%s" % attempt_sleeptime,
"-a", "admin_emails=%s" % admin_emails,
*other_bibtasklet_arguments)
| gpl-2.0 |
DDEFISHER/servo | tests/wpt/harness/wptrunner/browsers/firefox.py | 39 | 9818 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import subprocess
import sys
import mozinfo
from mozprocess import ProcessHandler
from mozprofile import FirefoxProfile, Preferences
from mozprofile.permissions import ServerLocations
from mozrunner import FirefoxRunner
from mozcrash import mozcrash
from .base import (get_free_port,
Browser,
ExecutorBrowser,
require_arg,
cmd_arg,
browser_command)
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executormarionette import (MarionetteTestharnessExecutor,
MarionetteRefTestExecutor,
MarionetteWdspecExecutor)
from ..environment import hostnames
here = os.path.join(os.path.split(__file__)[0])
__wptrunner__ = {"product": "firefox",
"check_args": "check_args",
"browser": "FirefoxBrowser",
"executor": {"testharness": "MarionetteTestharnessExecutor",
"reftest": "MarionetteRefTestExecutor",
"wdspec": "MarionetteWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_options": "env_options",
"run_info_extras": "run_info_extras",
"update_properties": "update_properties"}
def check_args(**kwargs):
require_arg(kwargs, "binary")
if kwargs["ssl_type"] != "none":
require_arg(kwargs, "certutil_binary")
def browser_kwargs(**kwargs):
return {"binary": kwargs["binary"],
"prefs_root": kwargs["prefs_root"],
"debug_info": kwargs["debug_info"],
"symbols_path": kwargs["symbols_path"],
"stackwalk_binary": kwargs["stackwalk_binary"],
"certutil_binary": kwargs["certutil_binary"],
"ca_certificate_path": kwargs["ssl_env"].ca_cert_path(),
"e10s": kwargs["gecko_e10s"]}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
executor_kwargs["close_after_done"] = True
if kwargs["timeout_multiplier"] is None:
if test_type == "reftest":
if run_info_data["debug"] or run_info_data.get("asan"):
executor_kwargs["timeout_multiplier"] = 4
else:
executor_kwargs["timeout_multiplier"] = 2
elif run_info_data["debug"] or run_info_data.get("asan"):
executor_kwargs["timeout_multiplier"] = 3
if test_type == "wdspec":
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
return executor_kwargs
def env_options():
return {"host": "127.0.0.1",
"external_host": "web-platform.test",
"bind_hostname": "false",
"certificate_domain": "web-platform.test",
"supports_debugger": True}
def run_info_extras(**kwargs):
return {"e10s": kwargs["gecko_e10s"]}
def update_properties():
return ["debug", "e10s", "os", "version", "processor", "bits"], {"debug", "e10s"}
class FirefoxBrowser(Browser):
used_ports = set()
init_timeout = 60
def __init__(self, logger, binary, prefs_root, debug_info=None,
symbols_path=None, stackwalk_binary=None, certutil_binary=None,
ca_certificate_path=None, e10s=False):
Browser.__init__(self, logger)
self.binary = binary
self.prefs_root = prefs_root
self.marionette_port = None
self.runner = None
self.debug_info = debug_info
self.profile = None
self.symbols_path = symbols_path
self.stackwalk_binary = stackwalk_binary
self.ca_certificate_path = ca_certificate_path
self.certutil_binary = certutil_binary
self.e10s = e10s
def start(self):
self.marionette_port = get_free_port(2828, exclude=self.used_ports)
self.used_ports.add(self.marionette_port)
env = os.environ.copy()
env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
locations = ServerLocations(filename=os.path.join(here, "server-locations.txt"))
preferences = self.load_prefs()
self.profile = FirefoxProfile(locations=locations,
preferences=preferences)
self.profile.set_preferences({"marionette.defaultPrefs.enabled": True,
"marionette.defaultPrefs.port": self.marionette_port,
"dom.disable_open_during_load": False,
"network.dns.localDomains": ",".join(hostnames)})
if self.e10s:
self.profile.set_preferences({"browser.tabs.remote.autostart": True})
if self.ca_certificate_path is not None:
self.setup_ssl()
debug_args, cmd = browser_command(self.binary, [cmd_arg("marionette"), "about:blank"],
self.debug_info)
self.runner = FirefoxRunner(profile=self.profile,
binary=cmd[0],
cmdargs=cmd[1:],
env=env,
process_class=ProcessHandler,
process_args={"processOutputLine": [self.on_output]})
self.logger.debug("Starting Firefox")
self.runner.start(debug_args=debug_args, interactive=self.debug_info and self.debug_info.interactive)
self.logger.debug("Firefox Started")
def load_prefs(self):
prefs_path = os.path.join(self.prefs_root, "prefs_general.js")
if os.path.exists(prefs_path):
preferences = Preferences.read_prefs(prefs_path)
else:
self.logger.warning("Failed to find base prefs file in %s" % prefs_path)
preferences = []
return preferences
def stop(self):
self.logger.debug("Stopping browser")
if self.runner is not None:
try:
self.runner.stop()
except OSError:
# This can happen on Windows if the process is already dead
pass
def pid(self):
if self.runner.process_handler is None:
return None
try:
return self.runner.process_handler.pid
except AttributeError:
return None
def on_output(self, line):
"""Write a line of output from the firefox process to the log"""
self.logger.process_output(self.pid(),
line.decode("utf8", "replace"),
command=" ".join(self.runner.command))
def is_alive(self):
if self.runner:
return self.runner.is_running()
return False
def cleanup(self):
self.stop()
def executor_browser(self):
assert self.marionette_port is not None
return ExecutorBrowser, {"marionette_port": self.marionette_port}
def log_crash(self, process, test):
dump_dir = os.path.join(self.profile.profile, "minidumps")
mozcrash.log_crashes(self.logger,
dump_dir,
symbols_path=self.symbols_path,
stackwalk_binary=self.stackwalk_binary,
process=process,
test=test)
def setup_ssl(self):
"""Create a certificate database to use in the test profile. This is configured
to trust the CA Certificate that has signed the web-platform.test server
certificate."""
self.logger.info("Setting up ssl")
# Make sure the certutil libraries from the source tree are loaded when using a
# local copy of certutil
# TODO: Maybe only set this if certutil won't launch?
env = os.environ.copy()
certutil_dir = os.path.dirname(self.binary)
if mozinfo.isMac:
env_var = "DYLD_LIBRARY_PATH"
elif mozinfo.isUnix:
env_var = "LD_LIBRARY_PATH"
else:
env_var = "PATH"
env[env_var] = (os.path.pathsep.join([certutil_dir, env[env_var]])
if env_var in env else certutil_dir).encode(
sys.getfilesystemencoding() or 'utf-8', 'replace')
def certutil(*args):
cmd = [self.certutil_binary] + list(args)
self.logger.process_output("certutil",
subprocess.check_output(cmd,
env=env,
stderr=subprocess.STDOUT),
" ".join(cmd))
pw_path = os.path.join(self.profile.profile, ".crtdbpw")
with open(pw_path, "w") as f:
# Use empty password for certificate db
f.write("\n")
cert_db_path = self.profile.profile
# Create a new certificate db
certutil("-N", "-d", cert_db_path, "-f", pw_path)
# Add the CA certificate to the database and mark as trusted to issue server certs
certutil("-A", "-d", cert_db_path, "-f", pw_path, "-t", "CT,,",
"-n", "web-platform-tests", "-i", self.ca_certificate_path)
# List all certs in the database
certutil("-L", "-d", cert_db_path)
| mpl-2.0 |
AriZuu/micropython | tests/wipy/spi.py | 69 | 3832 | '''
SPI test for the CC3200 based boards.
'''
from machine import SPI
import os
mch = os.uname().machine
if 'LaunchPad' in mch:
spi_pins = ('GP14', 'GP16', 'GP30')
elif 'WiPy' in mch:
spi_pins = ('GP14', 'GP16', 'GP30')
else:
raise Exception('Board not supported!')
spi = SPI(0, SPI.MASTER, baudrate=2000000, polarity=0, phase=0, firstbit=SPI.MSB, pins=spi_pins)
print(spi)
spi = SPI(baudrate=5000000)
print(spi)
spi = SPI(0, SPI.MASTER, baudrate=200000, bits=16, polarity=0, phase=0)
print(spi)
spi = SPI(0, SPI.MASTER, baudrate=10000000, polarity=0, phase=1)
print(spi)
spi = SPI(0, SPI.MASTER, baudrate=5000000, bits=32, polarity=1, phase=0)
print(spi)
spi = SPI(0, SPI.MASTER, baudrate=10000000, polarity=1, phase=1)
print(spi)
spi.init(baudrate=20000000, polarity=0, phase=0)
print(spi)
spi=SPI()
print(spi)
SPI(mode=SPI.MASTER)
SPI(mode=SPI.MASTER, pins=spi_pins)
SPI(id=0, mode=SPI.MASTER, polarity=0, phase=0, pins=('GP14', 'GP16', 'GP15'))
SPI(0, SPI.MASTER, polarity=0, phase=0, pins=('GP31', 'GP16', 'GP15'))
spi = SPI(0, SPI.MASTER, baudrate=10000000, polarity=0, phase=0, pins=spi_pins)
print(spi.write('123456') == 6)
buffer_r = bytearray(10)
print(spi.readinto(buffer_r) == 10)
print(spi.readinto(buffer_r, write=0x55) == 10)
read = spi.read(10)
print(len(read) == 10)
read = spi.read(10, write=0xFF)
print(len(read) == 10)
buffer_w = bytearray([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
print(spi.write_readinto(buffer_w, buffer_r) == 10)
print(buffer_w == buffer_r)
# test all polaritiy and phase combinations
spi.init(polarity=1, phase=0, pins=None)
buffer_r = bytearray(10)
spi.write_readinto(buffer_w, buffer_r)
print(buffer_w == buffer_r)
spi.init(polarity=1, phase=1, pins=None)
buffer_r = bytearray(10)
spi.write_readinto(buffer_w, buffer_r)
print(buffer_w == buffer_r)
spi.init(polarity=0, phase=1, pins=None)
buffer_r = bytearray(10)
spi.write_readinto(buffer_w, buffer_r)
print(buffer_w == buffer_r)
# test 16 and 32 bit transfers
buffer_w = bytearray([1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2])
buffer_r = bytearray(12)
spi.init(SPI.MASTER, baudrate=10000000, bits=16, polarity=0, phase=0, pins=None)
print(spi.write_readinto(buffer_w, buffer_r) == 12)
print(buffer_w == buffer_r)
buffer_r = bytearray(12)
spi.init(SPI.MASTER, baudrate=10000000, bits=32, polarity=0, phase=0, pins=None)
print(spi.write_readinto(buffer_w, buffer_r) == 12)
print(buffer_w == buffer_r)
# check for memory leaks...
for i in range (0, 1000):
spi = SPI(0, SPI.MASTER, baudrate=1000000)
# test deinit
spi = SPI(0, SPI.MASTER, baudrate=1000000)
spi.deinit()
print(spi)
spi = SPI(0, SPI.MASTER, baudrate=1000000)
# next ones must fail
try:
spi = SPI(0, 10, baudrate=10000000, polarity=0, phase=0)
except:
print("Exception")
try:
spi = SPI(0, mode=SPI.MASTER, baudrate=10000000, polarity=1, phase=2)
except:
print("Exception")
try:
spi = SPI(1, mode=SPI.MASTER, baudrate=10000000, polarity=1, phase=1)
except:
print("Exception")
try:
spi = SPI(0, mode=SPI.MASTER, baudrate=2000000, polarity=2, phase=0)
except:
print("Exception")
try:
spi = SPI(0, mode=SPI.MASTER, baudrate=2000000, polarity=2, phase=0, firstbit=2)
except:
print("Exception")
try:
spi = SPI(0, mode=SPI.MASTER, baudrate=2000000, polarity=2, phase=0, pins=('GP1', 'GP2'))
except:
print("Exception")
try:
spi = SPI(0, mode=SPI.MASTER, baudrate=2000000, polarity=0, phase=0, bits=9)
except:
print("Exception")
spi.deinit()
try:
spi.read(15)
except Exception:
print("Exception")
try:
spi.spi.readinto(buffer_r)
except Exception:
print("Exception")
try:
spi.spi.write('abc')
except Exception:
print("Exception")
try:
spi.write_readinto(buffer_w, buffer_r)
except Exception:
print("Exception")
# reinitialization must work
spi.init(baudrate=500000)
print(spi)
| mit |
gangadhar-kadam/smrterpshop | shopping_cart/shopping_cart/__init__.py | 12 | 3804 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import get_fullname, flt
from shopping_cart.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings import is_shopping_cart_enabled, get_default_territory
# TODO
# validate stock of each item in Website Warehouse or have a list of possible warehouses in Shopping Cart Settings
def get_quotation(user=None):
if not user:
user = frappe.session.user
if user == "Guest":
raise frappe.PermissionError
is_shopping_cart_enabled()
party = get_party(user)
values = {
"order_type": "Shopping Cart",
party.doctype.lower(): party.name,
"docstatus": 0,
"contact_email": user
}
try:
quotation = frappe.get_doc("Quotation", values)
except frappe.DoesNotExistError:
quotation = frappe.new_doc("Quotation")
quotation.update(values)
if party.doctype == "Customer":
quotation.contact_person = frappe.db.get_value("Contact", {"customer": party.name, "email_id": user})
quotation.insert(ignore_permissions=True)
return quotation
def set_item_in_cart(item_code, qty, user=None):
validate_item(item_code)
quotation = get_quotation(user=user)
qty = flt(qty)
quotation_item = quotation.get("quotation_details", {"item_code": item_code})
if qty==0:
if quotation_item:
# remove
quotation.get("quotation_details").remove(quotation_item[0])
else:
# add or update
if quotation_item:
quotation_item[0].qty = qty
else:
quotation.append("quotation_details", {
"doctype": "Quotation Item",
"item_code": item_code,
"qty": qty
})
quotation.save(ignore_permissions=True)
return quotation
def set_address_in_cart(address_fieldname, address, user=None):
quotation = get_quotation(user=user)
validate_address(quotation, address_fieldname, address)
if quotation.get(address_fieldname) != address:
quotation.set(address_fieldname, address)
if address_fieldname=="customer_address":
quotation.set("address_display", None)
else:
quotation.set("shipping_address", None)
quotation.save(ignore_permissions=True)
return quotation
def validate_item(item_code):
item = frappe.db.get_value("Item", item_code, ["item_name", "show_in_website"], as_dict=True)
if not item.show_in_website:
frappe.throw(_("{0} cannot be purchased using Shopping Cart").format(item.item_name))
def validate_address(quotation, address_fieldname, address):
party = get_party(quotation.contact_email)
address_doc = frappe.get_doc(address)
if address_doc.get(party.doctype.lower()) != party.name:
if address_fieldname=="customer_address":
frappe.throw(_("Invalid Billing Address"))
else:
frappe.throw(_("Invalid Shipping Address"))
def get_party(user):
def _get_party(user):
customer = frappe.db.get_value("Contact", {"email_id": user}, "customer")
if customer:
return frappe.get_doc("Customer", customer)
lead = frappe.db.get_value("Lead", {"email_id": user})
if lead:
return frappe.get_doc("Lead", lead)
# create a lead
lead = frappe.new_doc("Lead")
lead.update({
"email_id": user,
"lead_name": get_fullname(user),
"territory": guess_territory()
})
lead.insert(ignore_permissions=True)
return lead
if not getattr(frappe.local, "shopping_cart_party", None):
frappe.local.shopping_cart_party = {}
if not frappe.local.shopping_cart_party.get(user):
frappe.local.shopping_cart_party[user] = _get_party(user)
return frappe.local.shopping_cart_party[user]
def guess_territory():
territory = None
if frappe.session.get("session_country"):
territory = frappe.db.get_value("Territory", frappe.session.get("session_country"))
return territory or get_default_territory()
| agpl-3.0 |
postlund/home-assistant | homeassistant/components/mikrotik/device_tracker.py | 7 | 4356 | """Support for Mikrotik routers as device tracker."""
import logging
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.components.device_tracker.const import (
DOMAIN as DEVICE_TRACKER,
SOURCE_TYPE_ROUTER,
)
from homeassistant.core import callback
from homeassistant.helpers import entity_registry
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.dt as dt_util
from .const import ATTR_MANUFACTURER, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up device tracker for Mikrotik component."""
hub = hass.data[DOMAIN][config_entry.entry_id]
tracked = {}
registry = await entity_registry.async_get_registry(hass)
# Restore clients that is not a part of active clients list.
for entity in registry.entities.values():
if (
entity.config_entry_id == config_entry.entry_id
and entity.domain == DEVICE_TRACKER
):
if (
entity.unique_id in hub.api.devices
or entity.unique_id not in hub.api.all_devices
):
continue
hub.api.restore_device(entity.unique_id)
@callback
def update_hub():
"""Update the status of the device."""
update_items(hub, async_add_entities, tracked)
async_dispatcher_connect(hass, hub.signal_update, update_hub)
update_hub()
@callback
def update_items(hub, async_add_entities, tracked):
"""Update tracked device state from the hub."""
new_tracked = []
for mac, device in hub.api.devices.items():
if mac not in tracked:
tracked[mac] = MikrotikHubTracker(device, hub)
new_tracked.append(tracked[mac])
if new_tracked:
async_add_entities(new_tracked)
class MikrotikHubTracker(ScannerEntity):
"""Representation of network device."""
def __init__(self, device, hub):
"""Initialize the tracked device."""
self.device = device
self.hub = hub
self.unsub_dispatcher = None
@property
def is_connected(self):
"""Return true if the client is connected to the network."""
if (
self.device.last_seen
and (dt_util.utcnow() - self.device.last_seen)
< self.hub.option_detection_time
):
return True
return False
@property
def source_type(self):
"""Return the source type of the client."""
return SOURCE_TYPE_ROUTER
@property
def name(self) -> str:
"""Return the name of the client."""
return self.device.name
@property
def unique_id(self) -> str:
"""Return a unique identifier for this device."""
return self.device.mac
@property
def available(self) -> bool:
"""Return if controller is available."""
return self.hub.available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
if self.is_connected:
return self.device.attrs
return None
@property
def device_info(self):
"""Return a client description for device registry."""
info = {
"connections": {(CONNECTION_NETWORK_MAC, self.device.mac)},
"manufacturer": ATTR_MANUFACTURER,
"identifiers": {(DOMAIN, self.device.mac)},
"name": self.name,
"via_device": (DOMAIN, self.hub.serial_num),
}
return info
async def async_added_to_hass(self):
"""Client entity created."""
_LOGGER.debug("New network device tracker %s (%s)", self.name, self.unique_id)
self.unsub_dispatcher = async_dispatcher_connect(
self.hass, self.hub.signal_update, self.async_write_ha_state
)
async def async_update(self):
"""Synchronize state with hub."""
_LOGGER.debug(
"Updating Mikrotik tracked client %s (%s)", self.entity_id, self.unique_id
)
await self.hub.request_update()
async def will_remove_from_hass(self):
"""Disconnect from dispatcher."""
if self.unsub_dispatcher:
self.unsub_dispatcher()
| apache-2.0 |
DirtyPiece/dancestudio | Build/Tools/Python27/Tools/Scripts/combinerefs.py | 102 | 4381 | #! /usr/bin/env python
"""
combinerefs path
A helper for analyzing PYTHONDUMPREFS output.
When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown
time Py_Finalize() prints the list of all live objects twice: first it
prints the repr() of each object while the interpreter is still fully intact.
After cleaning up everything it can, it prints all remaining live objects
again, but the second time just prints their addresses, refcounts, and type
names (because the interpreter has been torn down, calling repr methods at
this point can get into infinite loops or blow up).
Save all this output into a file, then run this script passing the path to
that file. The script finds both output chunks, combines them, then prints
a line of output for each object still alive at the end:
address refcnt typename repr
address is the address of the object, in whatever format the platform C
produces for a %p format code.
refcnt is of the form
"[" ref "]"
when the object's refcount is the same in both PYTHONDUMPREFS output blocks,
or
"[" ref_before "->" ref_after "]"
if the refcount changed.
typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS
output block.
repr is repr(object), extracted from the first PYTHONDUMPREFS output block.
CAUTION: If object is a container type, it may not actually contain all the
objects shown in the repr: the repr was captured from the first output block,
and some of the containees may have been released since then. For example,
it's common for the line showing the dict of interned strings to display
strings that no longer exist at the end of Py_Finalize; this can be recognized
(albeit painfully) because such containees don't have a line of their own.
The objects are listed in allocation order, with most-recently allocated
printed first, and the first object allocated printed last.
Simple examples:
00857060 [14] str '__len__'
The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS
output blocks said there were 14 references to it. This is probably due to
C modules that intern the string "__len__" and keep a reference to it in a
file static.
00857038 [46->5] tuple ()
46-5 = 41 references to the empty tuple were removed by the cleanup actions
between the times PYTHONDUMPREFS produced output.
00858028 [1025->1456] str '<dummy key>'
The string '<dummy key>', which is used in dictobject.c to overwrite a real
key that gets deleted, grew several hundred references during cleanup. It
suggests that stuff did get removed from dicts by cleanup, but that the dicts
themselves are staying alive for some reason. """
import re
import sys
# Generate lines from fileiter. If whilematch is true, continue reading
# while the regexp object pat matches line. If whilematch is false, lines
# are read so long as pat doesn't match them. In any case, the first line
# that doesn't match pat (when whilematch is true), or that does match pat
# (when whilematch is false), is lost, and fileiter will resume at the line
# following it.
def read(fileiter, pat, whilematch):
for line in fileiter:
if bool(pat.match(line)) == whilematch:
yield line
else:
break
def combine(fname):
f = file(fname)
fi = iter(f)
for line in read(fi, re.compile(r'^Remaining objects:$'), False):
pass
crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)')
addr2rc = {}
addr2guts = {}
before = 0
for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):
m = crack.match(line)
if m:
addr, addr2rc[addr], addr2guts[addr] = m.groups()
before += 1
else:
print '??? skipped:', line
after = 0
for line in read(fi, crack, True):
after += 1
m = crack.match(line)
assert m
addr, rc, guts = m.groups() # guts is type name here
if addr not in addr2rc:
print '??? new object created while tearing down:', line.rstrip()
continue
print addr,
if rc == addr2rc[addr]:
print '[%s]' % rc,
else:
print '[%s->%s]' % (addr2rc[addr], rc),
print guts, addr2guts[addr]
f.close()
print "%d objects before, %d after" % (before, after)
if __name__ == '__main__':
combine(sys.argv[1])
| mit |
seize-the-dave/XlsxWriter | xlsxwriter/test/comparison/test_format12.py | 8 | 1456 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'format12.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test a vertical and horizontal centered format."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
top_left_bottom = workbook.add_format({
'left': 1,
'top': 1,
'bottom': 1,
})
top_bottom = workbook.add_format({
'top': 1,
'bottom': 1,
})
top_left = workbook.add_format({
'left': 1,
'top': 1,
})
worksheet.write('B2', 'test', top_left_bottom)
worksheet.write('D2', 'test', top_left)
worksheet.write('F2', 'test', top_bottom)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
idlead/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
pdamodaran/yellowbrick | yellowbrick/model_selection/validation_curve.py | 1 | 13903 | # yellowbrick.model_selection.validation_curve
# Implements a visual validation curve for a hyperparameter.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Sat Mar 31 06:27:28 2018 -0400
#
# ID: validation_curve.py [] benjamin@bengfort.com $
"""
Implements a visual validation curve for a hyperparameter.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from yellowbrick.base import ModelVisualizer
from yellowbrick.style import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
from sklearn.model_selection import validation_curve as sk_validation_curve
##########################################################################
## ValidationCurve visualizer
##########################################################################
class ValidationCurve(ModelVisualizer):
"""
Visualizes the validation curve for both test and training data for a
range of values for a single hyperparameter of the model. Adjusting the
value of a hyperparameter adjusts the complexity of a model. Less complex
models suffer from increased error due to bias, while more complex models
suffer from increased error due to variance. By inspecting the training
and cross-validated test score error, it is possible to estimate a good
value for a hyperparameter that balances the bias/variance trade-off.
The visualizer evaluates cross-validated training and test scores for the
different hyperparameters supplied. The curve is plotted so that the
x-axis is the value of the hyperparameter and the y-axis is the model
score. This is similar to a grid search with a single hyperparameter.
The cross-validation generator splits the dataset k times, and scores are
averaged over all k runs for the training and test subsets. The curve
plots the mean score, and the filled in area suggests the variability of
cross-validation by plotting one standard deviation above and below the
mean for each split.
Parameters
----------
model : a scikit-learn estimator
An object that implements ``fit`` and ``predict``, can be a
classifier, regressor, or clusterer so long as there is also a valid
associated scoring metric.
Note that the object is cloned for each validation.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
logx : boolean, optional
If True, plots the x-axis with a logarithmic scale.
groups : array-like, with shape (n_samples,)
Optional group labels for the samples used while splitting the dataset
into train/test sets.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
train_scores_ : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
train_scores_mean_ : array, shape (n_ticks,)
Mean training data scores for each training split
train_scores_std_ : array, shape (n_ticks,)
Standard deviation of training data scores for each training split
test_scores_ : array, shape (n_ticks, n_cv_folds)
Scores on test set.
test_scores_mean_ : array, shape (n_ticks,)
Mean test data scores for each test split
test_scores_std_ : array, shape (n_ticks,)
Standard deviation of test data scores for each test split
Examples
--------
>>> import numpy as np
>>> from yellowbrick.model_selection import ValidationCurve
>>> from sklearn.svm import SVC
>>> pr = np.logspace(-6,-1,5)
>>> model = ValidationCurve(SVC(), param_name="gamma", param_range=pr)
>>> model.fit(X, y)
>>> model.poof()
Notes
-----
This visualizer is essentially a wrapper for the
``sklearn.model_selection.validation_curve utility``, discussed in the
`validation curves <http://scikit-learn.org/stable/modules/learning_curve.html#validation-curve>`_
documentation.
.. seealso:: The documentation for the
`validation_curve <http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html#sklearn.model_selection.validation_curve>`_
function, which this visualizer wraps.
"""
def __init__(self, model, param_name, param_range, ax=None, logx=False,
groups=None, cv=None, scoring=None, n_jobs=1,
pre_dispatch="all", **kwargs):
# Initialize the model visualizer
super(ValidationCurve, self).__init__(model, ax=ax, **kwargs)
# Validate the param_range
param_range = np.asarray(param_range)
if param_range.ndim != 1:
raise YellowbrickValueError(
"must specify array of param values, '{}' is not valid".format(
repr(param_range)
))
# Set the visual and validation curve parameters on the estimator
self.set_params(
param_name=param_name, param_range=param_range, logx=logx,
groups=groups, cv=cv, scoring=scoring, n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
)
def fit(self, X, y=None):
"""
Fits the validation curve with the wrapped estimator and parameter
array to the specified data. Draws training and test score curves and
saves the scores to the visualizer.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
self : instance
Returns the instance of the validation curve visualizer for use in
pipelines and other sequential transformers.
"""
# arguments to pass to sk_validation_curve
skvc_kwargs = {
key: self.get_params()[key]
for key in (
'param_name', 'param_range', 'groups', 'cv', 'scoring',
'n_jobs', 'pre_dispatch',
)
}
# compute the validation curve and store scores
curve = sk_validation_curve(self.estimator, X, y, **skvc_kwargs)
self.train_scores_, self.test_scores_ = curve
# compute the mean and standard deviation of the training data
self.train_scores_mean_ = np.mean(self.train_scores_, axis=1)
self.train_scores_std_ = np.std(self.train_scores_, axis=1)
# compute the mean and standard deviation of the test data
self.test_scores_mean_ = np.mean(self.test_scores_, axis=1)
self.test_scores_std_ = np.std(self.test_scores_, axis=1)
# draw the curves on the current axes
self.draw()
return self
def draw(self, **kwargs):
"""
Renders the training and test curves.
"""
# Specify the curves to draw and their labels
labels = ("Training Score", "Cross Validation Score")
curves = (
(self.train_scores_mean_, self.train_scores_std_),
(self.test_scores_mean_, self.test_scores_std_),
)
# Get the colors for the train and test curves
colors = resolve_colors(n_colors=2)
# Plot the fill betweens first so they are behind the curves.
for idx, (mean, std) in enumerate(curves):
# Plot one standard deviation above and below the mean
self.ax.fill_between(
self.param_range, mean - std, mean+std, alpha=0.25,
color=colors[idx],
)
# Plot the mean curves so they are in front of the variance fill
for idx, (mean, _) in enumerate(curves):
self.ax.plot(
self.param_range, mean, 'd-', color=colors[idx],
label=labels[idx],
)
if self.logx:
self.ax.set_xscale('log')
return self.ax
def finalize(self, **kwargs):
"""
Add the title, legend, and other visual final touches to the plot.
"""
# Set the title of the figure
self.set_title('Validation Curve for {}'.format(self.name))
# Add the legend
self.ax.legend(frameon=True, loc='best')
# Set the axis labels
self.ax.set_xlabel(self.param_name)
self.ax.set_ylabel('score')
##########################################################################
## Quick Method
##########################################################################
def validation_curve(model, X, y, param_name, param_range, ax=None, logx=False,
groups=None, cv=None, scoring=None, n_jobs=1,
pre_dispatch="all", **kwargs):
"""
Displays a validation curve for the specified param and values, plotting
both the train and cross-validated test scores. The validation curve is a
visual, single-parameter grid search used to tune a model to find the best
balance between error due to bias and error due to variance.
This helper function is a wrapper to use the ValidationCurve in a fast,
visual analysis.
Parameters
----------
model : a scikit-learn estimator
An object that implements ``fit`` and ``predict``, can be a
classifier, regressor, or clusterer so long as there is also a valid
associated scoring metric.
Note that the object is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
logx : boolean, optional
If True, plots the x-axis with a logarithmic scale.
groups : array-like, with shape (n_samples,)
Optional group labels for the samples used while splitting the dataset
into train/test sets.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers. These arguments are
also passed to the `poof()` method, e.g. can pass a path to save the
figure to.
Returns
-------
ax : matplotlib.Axes
The axes object that the validation curves were drawn on.
"""
# Initialize the visualizer
oz = ValidationCurve(
model, param_name, param_range, ax=ax, logx=logx, groups=groups,
cv=cv, scoring=scoring, n_jobs=n_jobs, pre_dispatch=pre_dispatch
)
# Fit and poof the visualizer
oz.fit(X, y)
oz.poof(**kwargs)
return oz.ax
| apache-2.0 |
cicku/fedmsg | fedmsg/commands/hub.py | 5 | 3437 | # This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
import fedmsg
from fedmsg.utils import load_class
from fedmsg.commands import BaseCommand
class HubCommand(BaseCommand):
""" Run the fedmsg hub.
``fedmsg-hub`` is the all-purpose daemon. This should be run on every host
that has services which declare their own consumers. ``fedmsg-hub`` will
listen to every endpoint discovered by :mod:`fedmsg.config` and forward
messages in-process to the locally-declared consumers. It is a thin
wrapper over a moksha-hub.
Other commands like ``fedmsg-irc`` are just specialized, restricted
versions of ``fedmsg-hub``. ``fedmsg-hub`` is the most general/abstract.
``fedmsg-hub`` also houses the functions to run a websocket server.
"""
name = 'fedmsg-hub'
daemonizable = True
extra_args = [
(['--with-consumers'], {
'dest': 'explicit_hub_consumers',
'type': str,
'help': 'A comma-delimited list of conumers to run.',
'default': None,
}),
(['--websocket-server-port'], {
'dest': 'moksha.livesocket.websocket.port',
'type': int,
'help': 'Port on which to host the websocket server.',
'default': None,
}),
]
def run(self):
# Check if the user wants the websocket server to run
if self.config['moksha.livesocket.websocket.port']:
self.config['moksha.livesocket.backend'] = 'websocket'
# If the user wants to override any consumers installed on the system
# and *only* run the ones they want to, they can do that.
consumers = None
if self.config['explicit_hub_consumers']:
locations = self.config['explicit_hub_consumers'].split(',')
locations = [load_class(location) for location in locations]
# Rephrase the fedmsg-config.py config as moksha *.ini format.
# Note that the hub we kick off here cannot send any message. You
# should use fedmsg.publish(...) still for that.
moksha_options = dict(
zmq_subscribe_endpoints=','.join(
','.join(bunch) for bunch in self.config['endpoints'].values()
),
)
self.config.update(moksha_options)
from moksha.hub import main
main(
# Pass in our config dict
options=self.config,
# Only run the specified consumers if any are so specified.
consumers=consumers,
# Tell moksha to quiet its logging.
framework=False,
)
def hub():
command = HubCommand()
command.execute()
| lgpl-2.1 |
ax003d/openerp | openerp/addons/project/res_config.py | 55 | 4724 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_configuration(osv.osv_memory):
_name = 'project.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_project_mrp': fields.boolean('Generate tasks from sale orders',
help ="""This feature automatically creates project tasks from service products in sale orders.
More precisely, tasks are created for procurement lines with product of type 'Service',
procurement method 'Make to Order', and supply method 'Manufacture'.
This installs the module project_mrp."""),
'module_pad': fields.boolean("Use integrated collaborative note pads on task",
help="""Lets the company customize which Pad installation should be used to link to new pads
(by default, http://ietherpad.com/).
This installs the module pad."""),
'module_project_timesheet': fields.boolean("Record timesheet lines per tasks",
help="""This allows you to transfer the entries under tasks defined for Project Management to
the timesheet line entries for particular date and user, with the effect of creating,
editing and deleting either ways.
This installs the module project_timesheet."""),
'module_project_long_term': fields.boolean("Manage resources planning on gantt view",
help="""A long term project management module that tracks planning, scheduling, and resource allocation.
This installs the module project_long_term."""),
'module_project_issue': fields.boolean("Track issues and bugs",
help="""Provides management of issues/bugs in projects.
This installs the module project_issue."""),
'time_unit': fields.many2one('product.uom', 'Working time unit', required=True,
help="""This will set the unit of measure used in projects and tasks."""),
'module_project_issue_sheet': fields.boolean("Invoice working time on issues",
help="""Provides timesheet support for the issues/bugs management in project.
This installs the module project_issue_sheet."""),
'group_tasks_work_on_tasks': fields.boolean("Log work activities on tasks",
implied_group='project.group_tasks_work_on_tasks',
help="Allows you to compute work on tasks."),
'group_time_work_estimation_tasks': fields.boolean("Manage time estimation on tasks",
implied_group='project.group_time_work_estimation_tasks',
help="Allows you to compute Time Estimation on tasks."),
'group_manage_delegation_task': fields.boolean("Allow task delegation",
implied_group='project.group_delegate_task',
help="Allows you to delegate tasks to other users."),
}
def get_default_time_unit(self, cr, uid, fields, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return {'time_unit': user.company_id.project_time_mode_id.id}
def set_time_unit(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
user = self.pool.get('res.users').browse(cr, uid, uid, context)
user.company_id.write({'project_time_mode_id': config.time_unit.id})
def onchange_time_estimation_project_timesheet(self, cr, uid, ids, group_time_work_estimation_tasks, module_project_timesheet):
if group_time_work_estimation_tasks or module_project_timesheet:
return {'value': {'group_tasks_work_on_tasks': True}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
syrrim/werkzeug | examples/shortly/shortly.py | 32 | 4630 | # -*- coding: utf-8 -*-
"""
shortly
~~~~~~~
A simple URL shortener using Werkzeug and redis.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import redis
import urlparse
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.wsgi import SharedDataMiddleware
from werkzeug.utils import redirect
from jinja2 import Environment, FileSystemLoader
def base36_encode(number):
assert number >= 0, 'positive integer required'
if number == 0:
return '0'
base36 = []
while number != 0:
number, i = divmod(number, 36)
base36.append('0123456789abcdefghijklmnopqrstuvwxyz'[i])
return ''.join(reversed(base36))
def is_valid_url(url):
parts = urlparse.urlparse(url)
return parts.scheme in ('http', 'https')
def get_hostname(url):
return urlparse.urlparse(url).netloc
class Shortly(object):
def __init__(self, config):
self.redis = redis.Redis(config['redis_host'], config['redis_port'])
template_path = os.path.join(os.path.dirname(__file__), 'templates')
self.jinja_env = Environment(loader=FileSystemLoader(template_path),
autoescape=True)
self.jinja_env.filters['hostname'] = get_hostname
self.url_map = Map([
Rule('/', endpoint='new_url'),
Rule('/<short_id>', endpoint='follow_short_link'),
Rule('/<short_id>+', endpoint='short_link_details')
])
def on_new_url(self, request):
error = None
url = ''
if request.method == 'POST':
url = request.form['url']
if not is_valid_url(url):
error = 'Please enter a valid URL'
else:
short_id = self.insert_url(url)
return redirect('/%s+' % short_id)
return self.render_template('new_url.html', error=error, url=url)
def on_follow_short_link(self, request, short_id):
link_target = self.redis.get('url-target:' + short_id)
if link_target is None:
raise NotFound()
self.redis.incr('click-count:' + short_id)
return redirect(link_target)
def on_short_link_details(self, request, short_id):
link_target = self.redis.get('url-target:' + short_id)
if link_target is None:
raise NotFound()
click_count = int(self.redis.get('click-count:' + short_id) or 0)
return self.render_template('short_link_details.html',
link_target=link_target,
short_id=short_id,
click_count=click_count
)
def error_404(self):
response = self.render_template('404.html')
response.status_code = 404
return response
def insert_url(self, url):
short_id = self.redis.get('reverse-url:' + url)
if short_id is not None:
return short_id
url_num = self.redis.incr('last-url-id')
short_id = base36_encode(url_num)
self.redis.set('url-target:' + short_id, url)
self.redis.set('reverse-url:' + url, short_id)
return short_id
def render_template(self, template_name, **context):
t = self.jinja_env.get_template(template_name)
return Response(t.render(context), mimetype='text/html')
def dispatch_request(self, request):
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return getattr(self, 'on_' + endpoint)(request, **values)
except NotFound, e:
return self.error_404()
except HTTPException, e:
return e
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def create_app(redis_host='localhost', redis_port=6379, with_static=True):
app = Shortly({
'redis_host': redis_host,
'redis_port': redis_port
})
if with_static:
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/static': os.path.join(os.path.dirname(__file__), 'static')
})
return app
if __name__ == '__main__':
from werkzeug.serving import run_simple
app = create_app()
run_simple('127.0.0.1', 5000, app, use_debugger=True, use_reloader=True)
| bsd-3-clause |
tjhei/burnman | tests/test_spin.py | 7 | 1130 | from __future__ import absolute_import
import unittest
from util import BurnManTest
import os
import sys
sys.path.insert(1, os.path.abspath('..'))
import burnman
from burnman import minerals
class spin_transition(BurnManTest):
def test_new(self):
mins = [
minerals.Murakami_etal_2012.fe_periclase(), minerals.Murakami_etal_2012.fe_periclase_HS(), minerals.Murakami_etal_2012.fe_periclase_LS()]
for p in mins:
p.set_method('slb2')
# print "HS regime: (on/high/low)"
for p in mins:
p.set_state(5e9, 300)
self.assertFloatEqual(mins[0].v_s, mins[1].v_s)
# print "LS regime: (on/high/low)"
for p in mins:
p.set_state(70e9, 300)
self.assertFloatEqual(mins[0].v_s, mins[2].v_s)
def test_no_set_state(self):
m = minerals.Murakami_etal_2012.fe_periclase()
m.set_state(5e9, 300)
self.assertArraysAlmostEqual(m.molar_fractions, [0.0, 1.0])
m.set_state(70e9, 300)
self.assertArraysAlmostEqual(m.molar_fractions, [1.0, 0.0])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
patdoyle1/FastMath | lib/python2.7/site-packages/pip/cmdoptions.py | 361 | 9507 | """
shared options and groups
The principle here is to define options once, but *not* instantiate them globally.
One reason being that options with action='append' can carry state between parses.
pip parse's general options twice internally, and shouldn't pass on state.
To be consistent, all options will follow this design.
"""
import copy
from optparse import OptionGroup, SUPPRESS_HELP, Option
from pip.locations import build_prefix, default_log_file
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option.make())
return option_group
class OptionMaker(object):
"""Class that stores the args/kwargs that would be used to make an Option,
for making them later, and uses deepcopy's to reset state."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def make(self):
args_copy = copy.deepcopy(self.args)
kwargs_copy = copy.deepcopy(self.kwargs)
return Option(*args_copy, **kwargs_copy)
###########
# options #
###########
help_ = OptionMaker(
'-h', '--help',
dest='help',
action='help',
help='Show help.')
require_virtualenv = OptionMaker(
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = OptionMaker(
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.')
version = OptionMaker(
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = OptionMaker(
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.')
log = OptionMaker(
'--log',
dest='log',
metavar='path',
help='Path to a verbose appending log. This log is inactive by default.')
log_explicit_levels = OptionMaker(
# Writes the log levels explicitely to the log'
'--log-explicit-levels',
dest='log_explicit_levels',
action='store_true',
default=False,
help=SUPPRESS_HELP)
log_file = OptionMaker(
# The default log file
'--log-file', '--local-log',
dest='log_file',
metavar='path',
default=default_log_file,
help='Path to a verbose non-appending log, that only logs failures. This log is active by default at %default.')
no_input = OptionMaker(
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = OptionMaker(
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
timeout = OptionMaker(
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = OptionMaker(
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = OptionMaker(
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
exists_action = OptionMaker(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup.")
cert = OptionMaker(
'--cert',
dest='cert',
type='str',
default='',
metavar='path',
help = "Path to alternate CA bundle.")
index_url = OptionMaker(
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default='https://pypi.python.org/simple/',
help='Base URL of Python Package Index (default %default).')
extra_index_url = OptionMaker(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url.')
no_index = OptionMaker(
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
find_links = OptionMaker(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to archives. If a local path or file:// url that's a directory, then look for archives in the directory listing.")
# TODO: Remove after 1.6
use_mirrors = OptionMaker(
'-M', '--use-mirrors',
dest='use_mirrors',
action='store_true',
default=False,
help=SUPPRESS_HELP)
# TODO: Remove after 1.6
mirrors = OptionMaker(
'--mirrors',
dest='mirrors',
metavar='URL',
action='append',
default=[],
help=SUPPRESS_HELP)
allow_external = OptionMaker(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of externally hosted files",
)
allow_all_external = OptionMaker(
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help="Allow the installation of all externally hosted files",
)
# Remove after 1.7
no_allow_external = OptionMaker(
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 1.7
allow_unsafe = OptionMaker(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of insecure and unverifiable files",
)
# Remove after 1.7
no_allow_unsafe = OptionMaker(
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = OptionMaker(
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
requirements = OptionMaker(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
use_wheel = OptionMaker(
'--use-wheel',
dest='use_wheel',
action='store_true',
help=SUPPRESS_HELP,
)
no_use_wheel = OptionMaker(
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations.'),
)
download_cache = OptionMaker(
'--download-cache',
dest='download_cache',
metavar='dir',
default=None,
help='Cache downloaded packages in <dir>.')
no_deps = OptionMaker(
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = OptionMaker(
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
default=build_prefix,
help='Directory to unpack packages into and build in. '
'The default in a virtualenv is "<venv path>/build". '
'The default for global installs is "<OS temp dir>/pip_build_<username>".')
install_options = OptionMaker(
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/bin\"). "
"Use multiple --install-option options to pass multiple options to setup.py install. "
"If you are using an option with a directory path, be sure to use absolute path.")
global_options = OptionMaker(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = OptionMaker(
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
require_virtualenv,
verbose,
version,
quiet,
log_file,
log,
log_explicit_levels,
no_input,
proxy,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
cert,
]
}
index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
use_mirrors,
mirrors,
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
process_dependency_links,
]
}
| gpl-2.0 |
polyval/CNC | flask/Lib/site-packages/sqlalchemy/util/deprecations.py | 55 | 4403 | # util/deprecations.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Helpers related to deprecation of functions, methods, classes, other
functionality."""
from .. import exc
import warnings
import re
from .langhelpers import decorator
def warn_deprecated(msg, stacklevel=3):
warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel)
def warn_pending_deprecation(msg, stacklevel=3):
warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel)
def deprecated(version, message=None, add_deprecation_to_docstring=True):
"""Decorates a function and issues a deprecation warning on use.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s %s" % \
(version, (message or ''))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exc.SADeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def pending_deprecation(version, message=None,
add_deprecation_to_docstring=True):
"""Decorates a function and issues a pending deprecation warning on use.
:param version:
An approximate future version at which point the pending deprecation
will become deprecated. Not used in messaging.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s (pending) %s" % \
(version, (message or ''))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exc.SAPendingDeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def _sanitize_restructured_text(text):
def repl(m):
type_, name = m.group(1, 2)
if type_ in ("func", "meth"):
name += "()"
return name
return re.sub(r'\:(\w+)\:`~?\.?(.+?)`', repl, text)
def _decorate_with_warning(func, wtype, message, docstring_header=None):
"""Wrap a function with a warnings.warn and augmented docstring."""
message = _sanitize_restructured_text(message)
@decorator
def warned(fn, *args, **kwargs):
warnings.warn(message, wtype, stacklevel=3)
return fn(*args, **kwargs)
doc = func.__doc__ is not None and func.__doc__ or ''
if docstring_header is not None:
docstring_header %= dict(func=func.__name__)
doc = inject_docstring_text(doc, docstring_header, 1)
decorated = warned(func)
decorated.__doc__ = doc
return decorated
import textwrap
def _dedent_docstring(text):
split_text = text.split("\n", 1)
if len(split_text) == 1:
return text
else:
firstline, remaining = split_text
if not firstline.startswith(" "):
return firstline + "\n" + textwrap.dedent(remaining)
else:
return textwrap.dedent(text)
def inject_docstring_text(doctext, injecttext, pos):
doctext = _dedent_docstring(doctext or "")
lines = doctext.split('\n')
injectlines = textwrap.dedent(injecttext).split("\n")
if injectlines[0]:
injectlines.insert(0, "")
blanks = [num for num, line in enumerate(lines) if not line.strip()]
blanks.insert(0, 0)
inject_pos = blanks[min(pos, len(blanks) - 1)]
lines = lines[0:inject_pos] + injectlines + lines[inject_pos:]
return "\n".join(lines)
| apache-2.0 |
cecep-edu/edx-platform | lms/djangoapps/class_dashboard/tests/test_dashboard_data.py | 88 | 13672 | """
Tests for class dashboard (Metrics tab in instructor dashboard)
"""
import json
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from mock import patch
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.tests.factories import StudentModuleFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory, AdminFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from class_dashboard.dashboard_data import (
get_problem_grade_distribution, get_sequential_open_distrib,
get_problem_set_grade_distrib, get_d3_problem_grade_distrib,
get_d3_sequential_open_distrib, get_d3_section_grade_distrib,
get_section_display_name, get_array_section_has_problem,
get_students_opened_subsection, get_students_problem_grades,
)
from class_dashboard.views import has_instructor_access_for_class
USER_COUNT = 11
@attr('shard_1')
class TestGetProblemGradeDistribution(SharedModuleStoreTestCase):
"""
Tests related to class_dashboard/dashboard_data.py
"""
@classmethod
def setUpClass(cls):
super(TestGetProblemGradeDistribution, cls).setUpClass()
cls.course = CourseFactory.create(
display_name=u"test course omega \u03a9",
)
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
section = ItemFactory.create(
parent_location=cls.course.location,
category="chapter",
display_name=u"test factory section omega \u03a9",
)
cls.sub_section = ItemFactory.create(
parent_location=section.location,
category="sequential",
display_name=u"test subsection omega \u03a9",
)
cls.unit = ItemFactory.create(
parent_location=cls.sub_section.location,
category="vertical",
metadata={'graded': True, 'format': 'Homework'},
display_name=u"test unit omega \u03a9",
)
cls.items = []
for i in xrange(USER_COUNT - 1):
item = ItemFactory.create(
parent_location=cls.unit.location,
category="problem",
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'},
display_name=u"test problem omega \u03a9 " + str(i)
)
cls.items.append(item)
cls.item = item
def setUp(self):
super(TestGetProblemGradeDistribution, self).setUp()
self.request_factory = RequestFactory()
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password='test')
self.attempts = 3
self.users = [
UserFactory.create(username="metric" + str(__))
for __ in xrange(USER_COUNT)
]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
for i, item in enumerate(self.items):
for j, user in enumerate(self.users):
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1 if i < j else 0.5,
student=user,
course_id=self.course.id,
module_state_key=item.location,
state=json.dumps({'attempts': self.attempts}),
)
for j, user in enumerate(self.users):
StudentModuleFactory.create(
course_id=self.course.id,
module_type='sequential',
module_state_key=item.location,
)
def test_get_problem_grade_distribution(self):
prob_grade_distrib, total_student_count = get_problem_grade_distribution(self.course.id)
for problem in prob_grade_distrib:
max_grade = prob_grade_distrib[problem]['max_grade']
self.assertEquals(1, max_grade)
for val in total_student_count.values():
self.assertEquals(USER_COUNT, val)
def test_get_sequential_open_distibution(self):
sequential_open_distrib = get_sequential_open_distrib(self.course.id)
for problem in sequential_open_distrib:
num_students = sequential_open_distrib[problem]
self.assertEquals(USER_COUNT, num_students)
def test_get_problemset_grade_distrib(self):
prob_grade_distrib, __ = get_problem_grade_distribution(self.course.id)
probset_grade_distrib = get_problem_set_grade_distrib(self.course.id, prob_grade_distrib)
for problem in probset_grade_distrib:
max_grade = probset_grade_distrib[problem]['max_grade']
self.assertEquals(1, max_grade)
grade_distrib = probset_grade_distrib[problem]['grade_distrib']
sum_attempts = 0
for item in grade_distrib:
sum_attempts += item[1]
self.assertEquals(USER_COUNT, sum_attempts)
def test_get_d3_problem_grade_distrib(self):
d3_data = get_d3_problem_grade_distrib(self.course.id)
for data in d3_data:
for stack_data in data['data']:
sum_values = 0
for problem in stack_data['stackData']:
sum_values += problem['value']
self.assertEquals(USER_COUNT, sum_values)
def test_get_d3_sequential_open_distrib(self):
d3_data = get_d3_sequential_open_distrib(self.course.id)
for data in d3_data:
for stack_data in data['data']:
for problem in stack_data['stackData']:
value = problem['value']
self.assertEquals(0, value)
def test_get_d3_section_grade_distrib(self):
d3_data = get_d3_section_grade_distrib(self.course.id, 0)
for stack_data in d3_data:
sum_values = 0
for problem in stack_data['stackData']:
sum_values += problem['value']
self.assertEquals(USER_COUNT, sum_values)
def test_get_students_problem_grades(self):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
response_content = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
self.assertEquals(USER_COUNT, len(response_content))
self.assertEquals(False, response_max_exceeded)
for item in response_content:
if item['grade'] == 0:
self.assertEquals(0, item['percent'])
else:
self.assertEquals(100, item['percent'])
def test_get_students_problem_grades_max(self):
with patch('class_dashboard.dashboard_data.MAX_SCREEN_LIST_LENGTH', 2):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
# Only 2 students in the list and response_max_exceeded is True
self.assertEquals(2, len(response_results))
self.assertEquals(True, response_max_exceeded)
def test_get_students_problem_grades_csv(self):
tooltip = 'P1.2.1 Q1 - 3382 Students (100%: 1/1 questions)'
attributes = '?module_id=' + self.item.location.to_deprecated_string() + '&tooltip=' + tooltip + '&csv=true'
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
# Check header and a row for each student in csv response
self.assertContains(response, '"Name","Username","Grade","Percent"')
self.assertContains(response, '"metric0","0.0","0.0"')
self.assertContains(response, '"metric1","0.0","0.0"')
self.assertContains(response, '"metric2","0.0","0.0"')
self.assertContains(response, '"metric3","0.0","0.0"')
self.assertContains(response, '"metric4","0.0","0.0"')
self.assertContains(response, '"metric5","0.0","0.0"')
self.assertContains(response, '"metric6","0.0","0.0"')
self.assertContains(response, '"metric7","0.0","0.0"')
self.assertContains(response, '"metric8","0.0","0.0"')
self.assertContains(response, '"metric9","0.0","0.0"')
self.assertContains(response, '"metric10","1.0","100.0"')
def test_get_students_opened_subsection(self):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
self.assertEquals(USER_COUNT, len(response_results))
self.assertEquals(False, response_max_exceeded)
def test_get_students_opened_subsection_max(self):
with patch('class_dashboard.dashboard_data.MAX_SCREEN_LIST_LENGTH', 2):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
# Only 2 students in the list and response_max_exceeded is True
self.assertEquals(2, len(response_results))
self.assertEquals(True, response_max_exceeded)
def test_get_students_opened_subsection_csv(self):
tooltip = '4162 students opened Subsection 5: Relational Algebra Exercises'
attributes = '?module_id=' + self.item.location.to_deprecated_string() + '&tooltip=' + tooltip + '&csv=true'
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
self.assertContains(response, '"Name","Username"')
# Check response contains 1 line for each user +1 for the header
self.assertEquals(USER_COUNT + 1, len(response.content.splitlines()))
def test_post_metrics_data_subsections_csv(self):
url = reverse('post_metrics_data_csv')
sections = json.dumps(["Introduction"])
tooltips = json.dumps([[{"subsection_name": "Pre-Course Survey", "subsection_num": 1, "type": "subsection", "num_students": 18963}]])
course_id = self.course.id
data_type = 'subsection'
data = json.dumps({'sections': sections,
'tooltips': tooltips,
'course_id': course_id.to_deprecated_string(),
'data_type': data_type,
})
response = self.client.post(url, {'data': data})
# Check response contains 1 line for header, 1 line for Section and 1 line for Subsection
self.assertEquals(3, len(response.content.splitlines()))
def test_post_metrics_data_problems_csv(self):
url = reverse('post_metrics_data_csv')
sections = json.dumps(["Introduction"])
tooltips = json.dumps([[[
{'student_count_percent': 0,
'problem_name': 'Q1',
'grade': 0,
'percent': 0,
'label': 'P1.2.1',
'max_grade': 1,
'count_grade': 26,
'type': u'problem'},
{'student_count_percent': 99,
'problem_name': 'Q1',
'grade': 1,
'percent': 100,
'label': 'P1.2.1',
'max_grade': 1,
'count_grade': 4763,
'type': 'problem'},
]]])
course_id = self.course.id
data_type = 'problem'
data = json.dumps({'sections': sections,
'tooltips': tooltips,
'course_id': course_id.to_deprecated_string(),
'data_type': data_type,
})
response = self.client.post(url, {'data': data})
# Check response contains 1 line for header, 1 line for Sections and 2 lines for problems
self.assertEquals(4, len(response.content.splitlines()))
def test_get_section_display_name(self):
section_display_name = get_section_display_name(self.course.id)
self.assertMultiLineEqual(section_display_name[0], u"test factory section omega \u03a9")
def test_get_array_section_has_problem(self):
b_section_has_problem = get_array_section_has_problem(self.course.id)
self.assertEquals(b_section_has_problem[0], True)
def test_has_instructor_access_for_class(self):
"""
Test for instructor access
"""
ret_val = bool(has_instructor_access_for_class(self.instructor, self.course.id))
self.assertEquals(ret_val, True)
| agpl-3.0 |
ryfeus/lambda-packs | Tensorflow/source/setuptools/command/alias.py | 455 | 2426 | from distutils.errors import DistutilsOptionError
from setuptools.extern.six.moves import map
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg:
return repr(arg)
if arg.split() != [arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args) != 1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print("Command Aliases")
print("---------------")
for alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
elif len(self.args) == 1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
else:
print("No alias definition found for %r" % alias)
return
else:
alias = self.args[0]
command = ' '.join(map(shquote, self.args[1:]))
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source + name + ' ' + command
| mit |
ProjectVault/orp | third-party/qemu-orp/scripts/ordereddict.py | 1047 | 4094 | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| apache-2.0 |
appleseedhq/cortex | test/IECoreScene/MeshMergeOpTest.py | 1 | 11371 | ##########################################################################
#
# Copyright (c) 2008-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import IECoreScene
import imath
import math
class MeshMergeOpTest( unittest.TestCase ) :
def verifyPrimvars( self, primitive ):
for v in primitive.keys():
self.failUnless( primitive.isPrimitiveVariableValid(primitive[v]), "invalid primvar {0}".format( v ) )
def verifyMerge( self, mesh1, mesh2, merged ) :
self.verifyPrimvars( mesh1 )
self.verifyPrimvars( mesh2 )
self.verifyPrimvars( merged )
for v in IECoreScene.PrimitiveVariable.Interpolation.values :
i = IECoreScene.PrimitiveVariable.Interpolation( v )
if i!=IECoreScene.PrimitiveVariable.Interpolation.Invalid and i!=IECoreScene.PrimitiveVariable.Interpolation.Constant :
self.assertEqual( merged.variableSize( i ), mesh1.variableSize( i ) + mesh2.variableSize( i ) )
self.verifyData( mesh1, mesh2, merged )
self.verifyData( mesh2, mesh1, merged, flipped=True )
def verifyData( self, meshA, meshB, merged, flipped=False ) :
for name in meshA.keys() :
self.failUnless( name in merged )
interpolation = meshA[name].interpolation
if merged[name].indices :
self.assertEqual( len(merged[name].indices), meshA.variableSize( interpolation ) + meshB.variableSize( interpolation ) )
else :
self.assertEqual( len(merged[name].data), meshA.variableSize( interpolation ) + meshB.variableSize( interpolation ) )
offset = meshB.variableSize( interpolation ) if flipped else 0
if merged[name].indices and meshA[name].indices :
for i in range( 0, len(meshA[name].indices) ) :
index = merged[name].indices[offset + i]
indexA = meshA[name].indices[i]
self.assertEqual( index, indexA + offset if flipped else indexA )
self.assertEqual( merged[name].data[index], meshA[name].data[indexA] )
elif merged[name].indices :
for i in range( 0, len(meshA[name].data) ) :
index = merged[name].indices[offset + i]
indexA = offset + i
self.assertEqual( index, indexA )
self.assertEqual( merged[name].data[index], meshA[name].data[i] )
elif meshA[name].indices :
for i in range( 0, len(meshA[name].indices) ) :
indexA = meshA[name].indices[i]
self.assertEqual( merged[name].data[offset + i], meshA[name].data[indexA] )
else :
for i in range( 0, len(meshA[name].data) ) :
self.assertEqual( merged[name].data[offset + i], meshA[name].data[i] )
offset = 0 if flipped else meshA.variableSize( interpolation )
if name in meshB and meshB[name].interpolation == interpolation :
if merged[name].indices and meshB[name].indices :
for i in range( 0, len(meshB[name].indices) ) :
index = merged[name].indices[offset + i]
indexB = meshB[name].indices[i]
self.assertEqual( index, indexB if flipped else indexB + offset )
self.assertEqual( merged[name].data[index], meshB[name].data[indexB] )
elif merged[name].indices :
for i in range( 0, len(meshB[name].data) ) :
index = merged[name].indices[offset + i]
indexB = offset + i
self.assertEqual( index, indexB )
self.assertEqual( merged[name].data[index], meshB[name].data[i] )
elif meshB[name].indices :
for i in range( 0, len(meshB[name].indices) ) :
indexB = meshB[name].indices[i]
self.assertEqual( merged[name].data[offset + i], meshB[name].data[indexB] )
else :
for i in range( 0, len(meshB[name].data) ) :
self.assertEqual( merged[name].data[offset + i], meshB[name].data[i] )
def testPlanes( self ) :
p1 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 0 ) ) )
p2 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.verifyMerge( p1, p2, merged )
def testDifferentPrimVars( self ) :
p1 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 0 ) ) )
p2 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
del p2["N"]
self.assertNotEqual( p1.keys(), p2.keys() )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.verifyMerge( p1, p2, merged )
p2 = IECoreScene.MeshAlgo.triangulate( p2 )
p2['myInt'] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.IntVectorData( [ 0, 1, 2, 3, 4 ,5 ] ) )
uTangent, vTangent = IECoreScene.MeshAlgo.calculateTangentsFromUV( p2 )
p2["uTangent"] = uTangent
p2["vTangent"] = vTangent
self.assertNotEqual( p1.keys(), p2.keys() )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.verifyMerge( p1, p2, merged )
def testSamePrimVarNamesWithDifferentInterpolation( self ) :
plane = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 0 ) ) )
IECoreScene.MeshNormalsOp()( input=plane, copyInput=False )
box = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( 0 ), imath.V3f( 1 ) ) )
IECoreScene.MeshNormalsOp()( input=box, copyInput=False )
IECoreScene.FaceVaryingPromotionOp()( input=box, copyInput=False, primVarNames=IECore.StringVectorData( [ "N" ] ) )
self.assertEqual( plane.keys(), box.keys() )
merged = IECoreScene.MeshMergeOp()( input=plane, mesh=box )
del box["N"]
self.verifyMerge( plane, box, merged )
def testRemovePrimVars( self ) :
p1 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 0 ) ) )
p2 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
del p2["N"]
self.assertNotEqual( p1.keys(), p2.keys() )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2, removeNonMatchingPrimVars=False )
self.failUnless( "N" in merged )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2, removeNonMatchingPrimVars=True )
self.failUnless( "N" not in merged )
del p1["N"]
self.verifyMerge( p1, p2, merged )
p2 = IECoreScene.MeshAlgo.triangulate( p2 )
p2['myInt'] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.IntVectorData( [ 0, 1, 2, 3, 4 ,5 ] ) )
uTangent, vTangent = IECoreScene.MeshAlgo.calculateTangentsFromUV( p2 )
p2["uTangent"] = uTangent
p2["vTangent"] = vTangent
self.assertNotEqual( p1.keys(), p2.keys() )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2, removeNonMatchingPrimVars=False )
self.failUnless( "uTangent" in merged )
self.failUnless( "vTangent" in merged )
self.failUnless( "myInt" in merged )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2, removeNonMatchingPrimVars=True )
self.failUnless( "uTangent" not in merged )
self.failUnless( "vTangent" not in merged )
self.failUnless( "myInt" not in merged )
del p2["uTangent"]
del p2["vTangent"]
del p2["myInt"]
self.verifyMerge( p1, p2, merged )
def testReferencedData( self ) :
p1 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 0 ) ) )
p1["Pref"] = p1["P"]
p2 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.failUnless( "Pref" in merged )
self.verifyMerge( p1, p2, merged )
del p1["Pref"]
p2["Pref"] = p2["P"]
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.failUnless( "Pref" in merged )
self.verifyMerge( p1, p2, merged )
def testIndexedPrimVars( self ) :
p1 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 0 ) ) )
p2 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
# both meshes have indexed UVs
p1["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, p1["uv"].data, IECore.IntVectorData( [ 0, 3, 1, 2 ] ) )
p2["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, p2["uv"].data, IECore.IntVectorData( [ 2, 1, 0, 3 ] ) )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.verifyMerge( p1, p2, merged )
# meshA has indexed UVs, meshB has expanded UVs
p2["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, p2["uv"].data, None )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.verifyMerge( p1, p2, merged )
# both meshes have expanded UVs
p1["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, p1["uv"].data, None )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.verifyMerge( p1, p2, merged )
# meshA has expanded UVs, meshB has indexed UVs
p2["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, p2["uv"].data, IECore.IntVectorData( [ 2, 1, 0, 3 ] ) )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.verifyMerge( p1, p2, merged )
# meshA has indexed UVs, meshB has no UVs
p1["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, p1["uv"].data, IECore.IntVectorData( [ 0, 3, 1, 2 ] ) )
del p2["uv"]
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.verifyMerge( p1, p2, merged )
# meshA has no UVs, meshB has indexed UVs
del p1["uv"]
p2 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
p2["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, p2["uv"].data, IECore.IntVectorData( [ 2, 1, 0, 3 ] ) )
merged = IECoreScene.MeshMergeOp()( input=p1, mesh=p2 )
self.verifyMerge( p1, p2, merged )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
skposs/WinLibmobiledevice | libjson/makerelease.py | 4 | 15513 | """Tag the sandbox for release, make source and doc tarballs.
Requires Python 2.6
Example of invocation (use to test the script):
python makerelease.py --platform=msvc6,msvc71,msvc80,msvc90,mingw -ublep 0.6.0 0.7.0-dev
When testing this script:
python makerelease.py --force --retag --platform=msvc6,msvc71,msvc80,mingw -ublep test-0.6.0 test-0.6.1-dev
Example of invocation when doing a release:
python makerelease.py 0.5.0 0.6.0-dev
Note: This was for Subversion. Now that we are in GitHub, we do not
need to build versioned tarballs anymore, so makerelease.py is defunct.
"""
from __future__ import print_function
import os.path
import subprocess
import sys
import doxybuild
import subprocess
import xml.etree.ElementTree as ElementTree
import shutil
import urllib2
import tempfile
import os
import time
from devtools import antglob, fixeol, tarball
import amalgamate
SVN_ROOT = 'https://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/'
SVN_TAG_ROOT = SVN_ROOT + 'tags/jsoncpp'
SCONS_LOCAL_URL = 'http://sourceforge.net/projects/scons/files/scons-local/1.2.0/scons-local-1.2.0.tar.gz/download'
SOURCEFORGE_PROJECT = 'jsoncpp'
def set_version(version):
with open('version','wb') as f:
f.write(version.strip())
def rmdir_if_exist(dir_path):
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
class SVNError(Exception):
pass
def svn_command(command, *args):
cmd = ['svn', '--non-interactive', command] + list(args)
print('Running:', ' '.join(cmd))
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = process.communicate()[0]
if process.returncode:
error = SVNError('SVN command failed:\n' + stdout)
error.returncode = process.returncode
raise error
return stdout
def check_no_pending_commit():
"""Checks that there is no pending commit in the sandbox."""
stdout = svn_command('status', '--xml')
etree = ElementTree.fromstring(stdout)
msg = []
for entry in etree.getiterator('entry'):
path = entry.get('path')
status = entry.find('wc-status').get('item')
if status != 'unversioned' and path != 'version':
msg.append('File "%s" has pending change (status="%s")' % (path, status))
if msg:
msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!')
return '\n'.join(msg)
def svn_join_url(base_url, suffix):
if not base_url.endswith('/'):
base_url += '/'
if suffix.startswith('/'):
suffix = suffix[1:]
return base_url + suffix
def svn_check_if_tag_exist(tag_url):
"""Checks if a tag exist.
Returns: True if the tag exist, False otherwise.
"""
try:
list_stdout = svn_command('list', tag_url)
except SVNError as e:
if e.returncode != 1 or not str(e).find('tag_url'):
raise e
# otherwise ignore error, meaning tag does not exist
return False
return True
def svn_commit(message):
"""Commit the sandbox, providing the specified comment.
"""
svn_command('ci', '-m', message)
def svn_tag_sandbox(tag_url, message):
"""Makes a tag based on the sandbox revisions.
"""
svn_command('copy', '-m', message, '.', tag_url)
def svn_remove_tag(tag_url, message):
"""Removes an existing tag.
"""
svn_command('delete', '-m', message, tag_url)
def svn_export(tag_url, export_dir):
"""Exports the tag_url revision to export_dir.
Target directory, including its parent is created if it does not exist.
If the directory export_dir exist, it is deleted before export proceed.
"""
rmdir_if_exist(export_dir)
svn_command('export', tag_url, export_dir)
def fix_sources_eol(dist_dir):
"""Set file EOL for tarball distribution.
"""
print('Preparing exported source file EOL for distribution...')
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
win_sources = antglob.glob(dist_dir,
includes = '**/*.sln **/*.vcproj',
prune_dirs = prune_dirs)
unix_sources = antglob.glob(dist_dir,
includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
sconscript *.json *.expected AUTHORS LICENSE''',
excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
prune_dirs = prune_dirs)
for path in win_sources:
fixeol.fix_source_eol(path, is_dry_run = False, verbose = True, eol = '\r\n')
for path in unix_sources:
fixeol.fix_source_eol(path, is_dry_run = False, verbose = True, eol = '\n')
def download(url, target_path):
"""Download file represented by url to target_path.
"""
f = urllib2.urlopen(url)
try:
data = f.read()
finally:
f.close()
fout = open(target_path, 'wb')
try:
fout.write(data)
finally:
fout.close()
def check_compile(distcheck_top_dir, platform):
cmd = [sys.executable, 'scons.py', 'platform=%s' % platform, 'check']
print('Running:', ' '.join(cmd))
log_path = os.path.join(distcheck_top_dir, 'build-%s.log' % platform)
flog = open(log_path, 'wb')
try:
process = subprocess.Popen(cmd,
stdout=flog,
stderr=subprocess.STDOUT,
cwd=distcheck_top_dir)
stdout = process.communicate()[0]
status = (process.returncode == 0)
finally:
flog.close()
return (status, log_path)
def write_tempfile(content, **kwargs):
fd, path = tempfile.mkstemp(**kwargs)
f = os.fdopen(fd, 'wt')
try:
f.write(content)
finally:
f.close()
return path
class SFTPError(Exception):
pass
def run_sftp_batch(userhost, sftp, batch, retry=0):
path = write_tempfile(batch, suffix='.sftp', text=True)
# psftp -agent -C blep,jsoncpp@web.sourceforge.net -batch -b batch.sftp -bc
cmd = [sftp, '-agent', '-C', '-batch', '-b', path, '-bc', userhost]
error = None
for retry_index in range(0, max(1,retry)):
heading = retry_index == 0 and 'Running:' or 'Retrying:'
print(heading, ' '.join(cmd))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = process.communicate()[0]
if process.returncode != 0:
error = SFTPError('SFTP batch failed:\n' + stdout)
else:
break
if error:
raise error
return stdout
def sourceforge_web_synchro(sourceforge_project, doc_dir,
user=None, sftp='sftp'):
"""Notes: does not synchronize sub-directory of doc-dir.
"""
userhost = '%s,%s@web.sourceforge.net' % (user, sourceforge_project)
stdout = run_sftp_batch(userhost, sftp, """
cd htdocs
dir
exit
""")
existing_paths = set()
collect = 0
for line in stdout.split('\n'):
line = line.strip()
if not collect and line.endswith('> dir'):
collect = True
elif collect and line.endswith('> exit'):
break
elif collect == 1:
collect = 2
elif collect == 2:
path = line.strip().split()[-1:]
if path and path[0] not in ('.', '..'):
existing_paths.add(path[0])
upload_paths = set([os.path.basename(p) for p in antglob.glob(doc_dir)])
paths_to_remove = existing_paths - upload_paths
if paths_to_remove:
print('Removing the following file from web:')
print('\n'.join(paths_to_remove))
stdout = run_sftp_batch(userhost, sftp, """cd htdocs
rm %s
exit""" % ' '.join(paths_to_remove))
print('Uploading %d files:' % len(upload_paths))
batch_size = 10
upload_paths = list(upload_paths)
start_time = time.time()
for index in range(0,len(upload_paths),batch_size):
paths = upload_paths[index:index+batch_size]
file_per_sec = (time.time() - start_time) / (index+1)
remaining_files = len(upload_paths) - index
remaining_sec = file_per_sec * remaining_files
print('%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec))
run_sftp_batch(userhost, sftp, """cd htdocs
lcd %s
mput %s
exit""" % (doc_dir, ' '.join(paths)), retry=3)
def sourceforge_release_tarball(sourceforge_project, paths, user=None, sftp='sftp'):
userhost = '%s,%s@frs.sourceforge.net' % (user, sourceforge_project)
run_sftp_batch(userhost, sftp, """
mput %s
exit
""" % (' '.join(paths),))
def main():
usage = """%prog release_version next_dev_version
Update 'version' file to release_version and commit.
Generates the document tarball.
Tags the sandbox revision with release_version.
Update 'version' file to next_dev_version and commit.
Performs an svn export of tag release version, and build a source tarball.
Must be started in the project top directory.
Warning: --force should only be used when developping/testing the release script.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('--dot', dest="dot_path", action='store', default=doxybuild.find_program('dot'),
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=doxybuild.find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
parser.add_option('--force', dest="ignore_pending_commit", action='store_true', default=False,
help="""Ignore pending commit. [Default: %default]""")
parser.add_option('--retag', dest="retag_release", action='store_true', default=False,
help="""Overwrite release existing tag if it exist. [Default: %default]""")
parser.add_option('-p', '--platforms', dest="platforms", action='store', default='',
help="""Comma separated list of platform passed to scons for build check.""")
parser.add_option('--no-test', dest="no_test", action='store_true', default=False,
help="""Skips build check.""")
parser.add_option('--no-web', dest="no_web", action='store_true', default=False,
help="""Do not update web site.""")
parser.add_option('-u', '--upload-user', dest="user", action='store',
help="""Sourceforge user for SFTP documentation upload.""")
parser.add_option('--sftp', dest='sftp', action='store', default=doxybuild.find_program('psftp', 'sftp'),
help="""Path of the SFTP compatible binary used to upload the documentation.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 2:
parser.error('release_version missing on command-line.')
release_version = args[0]
next_version = args[1]
if not options.platforms and not options.no_test:
parser.error('You must specify either --platform or --no-test option.')
if options.ignore_pending_commit:
msg = ''
else:
msg = check_no_pending_commit()
if not msg:
print('Setting version to', release_version)
set_version(release_version)
svn_commit('Release ' + release_version)
tag_url = svn_join_url(SVN_TAG_ROOT, release_version)
if svn_check_if_tag_exist(tag_url):
if options.retag_release:
svn_remove_tag(tag_url, 'Overwriting previous tag')
else:
print('Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url)
sys.exit(1)
svn_tag_sandbox(tag_url, 'Release ' + release_version)
print('Generated doxygen document...')
## doc_dirname = r'jsoncpp-api-html-0.5.0'
## doc_tarball_path = r'e:\prg\vc\Lib\jsoncpp-trunk\dist\jsoncpp-api-html-0.5.0.tar.gz'
doc_tarball_path, doc_dirname = doxybuild.build_doc(options, make_release=True)
doc_distcheck_dir = 'dist/doccheck'
tarball.decompress(doc_tarball_path, doc_distcheck_dir)
doc_distcheck_top_dir = os.path.join(doc_distcheck_dir, doc_dirname)
export_dir = 'dist/export'
svn_export(tag_url, export_dir)
fix_sources_eol(export_dir)
source_dir = 'jsoncpp-src-' + release_version
source_tarball_path = 'dist/%s.tar.gz' % source_dir
print('Generating source tarball to', source_tarball_path)
tarball.make_tarball(source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir)
amalgamation_tarball_path = 'dist/%s-amalgamation.tar.gz' % source_dir
print('Generating amalgamation source tarball to', amalgamation_tarball_path)
amalgamation_dir = 'dist/amalgamation'
amalgamate.amalgamate_source(export_dir, '%s/jsoncpp.cpp' % amalgamation_dir, 'json/json.h')
amalgamation_source_dir = 'jsoncpp-src-amalgamation' + release_version
tarball.make_tarball(amalgamation_tarball_path, [amalgamation_dir],
amalgamation_dir, prefix_dir=amalgamation_source_dir)
# Decompress source tarball, download and install scons-local
distcheck_dir = 'dist/distcheck'
distcheck_top_dir = distcheck_dir + '/' + source_dir
print('Decompressing source tarball to', distcheck_dir)
rmdir_if_exist(distcheck_dir)
tarball.decompress(source_tarball_path, distcheck_dir)
scons_local_path = 'dist/scons-local.tar.gz'
print('Downloading scons-local to', scons_local_path)
download(SCONS_LOCAL_URL, scons_local_path)
print('Decompressing scons-local to', distcheck_top_dir)
tarball.decompress(scons_local_path, distcheck_top_dir)
# Run compilation
print('Compiling decompressed tarball')
all_build_status = True
for platform in options.platforms.split(','):
print('Testing platform:', platform)
build_status, log_path = check_compile(distcheck_top_dir, platform)
print('see build log:', log_path)
print(build_status and '=> ok' or '=> FAILED')
all_build_status = all_build_status and build_status
if not build_status:
print('Testing failed on at least one platform, aborting...')
svn_remove_tag(tag_url, 'Removing tag due to failed testing')
sys.exit(1)
if options.user:
if not options.no_web:
print('Uploading documentation using user', options.user)
sourceforge_web_synchro(SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp)
print('Completed documentation upload')
print('Uploading source and documentation tarballs for release using user', options.user)
sourceforge_release_tarball(SOURCEFORGE_PROJECT,
[source_tarball_path, doc_tarball_path],
user=options.user, sftp=options.sftp)
print('Source and doc release tarballs uploaded')
else:
print('No upload user specified. Web site and download tarbal were not uploaded.')
print('Tarball can be found at:', doc_tarball_path)
# Set next version number and commit
set_version(next_version)
svn_commit('Released ' + release_version)
else:
sys.stderr.write(msg + '\n')
if __name__ == '__main__':
main()
| lgpl-2.1 |
Tokyo-Buffalo/tokyosouth | env/lib/python3.6/site-packages/pyasn1/codec/cer/encoder.py | 160 | 4998 | # CER encoder
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1.codec.ber import encoder
from pyasn1.compat.octets import int2oct, str2octs, null
from pyasn1 import error
class BooleanEncoder(encoder.IntegerEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
if client == 0:
substrate = int2oct(0)
else:
substrate = int2oct(255)
return substrate, 0
class BitStringEncoder(encoder.BitStringEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
return encoder.BitStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class OctetStringEncoder(encoder.OctetStringEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
return encoder.OctetStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class RealEncoder(encoder.RealEncoder):
def _chooseEncBase(self, value):
m, b, e = value
return self._dropFloatingPoint(m, b, e)
# specialized GeneralStringEncoder here
class GeneralizedTimeEncoder(OctetStringEncoder):
zchar = str2octs('Z')
pluschar = str2octs('+')
minuschar = str2octs('-')
zero = str2octs('0')
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
octets = client.asOctets()
# This breaks too many existing data items
# if '.' not in octets:
# raise error.PyAsn1Error('Format must include fraction of second: %r' % octets)
if len(octets) < 15:
raise error.PyAsn1Error('Bad UTC time length: %r' % octets)
if self.pluschar in octets or self.minuschar in octets:
raise error.PyAsn1Error('Must be UTC time: %r' % octets)
if octets[-1] != self.zchar[0]:
raise error.PyAsn1Error('Missing timezone specifier: %r' % octets)
return encoder.OctetStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class UTCTimeEncoder(encoder.OctetStringEncoder):
zchar = str2octs('Z')
pluschar = str2octs('+')
minuschar = str2octs('-')
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
octets = client.asOctets()
if self.pluschar in octets or self.minuschar in octets:
raise error.PyAsn1Error('Must be UTC time: %r' % octets)
if octets and octets[-1] != self.zchar[0]:
client = client.clone(octets + self.zchar)
if len(client) != 13:
raise error.PyAsn1Error('Bad UTC time length: %r' % client)
return encoder.OctetStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class SetOfEncoder(encoder.SequenceOfEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
if isinstance(client, univ.SequenceAndSetBase):
client.setDefaultComponents()
client.verifySizeSpec()
substrate = null; idx = len(client)
# This is certainly a hack but how else do I distinguish SetOf
# from Set if they have the same tags&constraints?
if isinstance(client, univ.SequenceAndSetBase):
# Set
comps = []
while idx > 0:
idx = idx - 1
if client[idx] is None: # Optional component
continue
if client.getDefaultComponentByPosition(idx) == client[idx]:
continue
comps.append(client[idx])
comps.sort(key=lambda x: isinstance(x, univ.Choice) and \
x.getMinTagSet() or x.getTagSet())
for c in comps:
substrate += encodeFun(c, defMode, maxChunkSize)
else:
# SetOf
compSubs = []
while idx > 0:
idx = idx - 1
compSubs.append(
encodeFun(client[idx], defMode, maxChunkSize)
)
compSubs.sort() # perhaps padding's not needed
substrate = null
for compSub in compSubs:
substrate += compSub
return substrate, 1
tagMap = encoder.tagMap.copy()
tagMap.update({
univ.Boolean.tagSet: BooleanEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.Real.tagSet: RealEncoder(),
useful.GeneralizedTime.tagSet: GeneralizedTimeEncoder(),
useful.UTCTime.tagSet: UTCTimeEncoder(),
univ.SetOf().tagSet: SetOfEncoder() # conflcts with Set
})
typeMap = encoder.typeMap.copy()
typeMap.update({
univ.Set.typeId: SetOfEncoder(),
univ.SetOf.typeId: SetOfEncoder()
})
class Encoder(encoder.Encoder):
def __call__(self, client, defMode=False, maxChunkSize=0):
return encoder.Encoder.__call__(self, client, defMode, maxChunkSize)
encode = Encoder(tagMap, typeMap)
# EncoderFactory queries class instance and builds a map of tags -> encoders
| mit |
BitWriters/Zenith_project | zango/lib/python3.5/site-packages/django/core/signing.py | 149 | 6814 | """
Functions for creating and restoring url-safe signed JSON objects.
The format used looks like this:
>>> signing.dumps("hello")
'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk'
There are two components here, separated by a ':'. The first component is a
URLsafe base64 encoded JSON of the object passed to dumps(). The second
component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret"
signing.loads(s) checks the signature and returns the deserialized object.
If the signature fails, a BadSignature exception is raised.
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk")
u'hello'
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified")
...
BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified
You can optionally compress the JSON prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signing.dumps(range(1, 20), compress=True)
'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 JSON.
There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'.
These functions make use of all of them.
"""
from __future__ import unicode_literals
import base64
import datetime
import json
import time
import zlib
from django.conf import settings
from django.utils import baseconv
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_string
class BadSignature(Exception):
"""
Signature does not match
"""
pass
class SignatureExpired(BadSignature):
"""
Signature timestamp is older than required max_age
"""
pass
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b'=')
def b64_decode(s):
pad = b'=' * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key):
return b64_encode(salted_hmac(salt, value, key).digest())
def get_cookie_signer(salt='django.core.signing.get_cookie_signer'):
Signer = import_string(settings.SIGNING_BACKEND)
key = force_bytes(settings.SECRET_KEY)
return Signer(b'django.http.cookies' + key, salt=salt)
class JSONSerializer(object):
"""
Simple wrapper around json to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':')).encode('latin-1')
def loads(self, data):
return json.loads(data.decode('latin-1'))
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):
"""
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None):
"""
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
class Signer(object):
def __init__(self, key=None, sep=':', salt=None):
# Use of native strings in all versions of Python
self.sep = force_str(sep)
self.key = key or settings.SECRET_KEY
self.salt = force_str(salt or
'%s.%s' % (self.__class__.__module__, self.__class__.__name__))
def signature(self, value):
signature = base64_hmac(self.salt + 'signer', value, self.key)
# Convert the signature from bytes to str only on Python 3
return force_str(signature)
def sign(self, value):
value = force_str(value)
return str('%s%s%s') % (value, self.sep, self.signature(value))
def unsign(self, signed_value):
signed_value = force_str(signed_value)
if self.sep not in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig, self.signature(value)):
return force_text(value)
raise BadSignature('Signature "%s" does not match' % sig)
class TimestampSigner(Signer):
def timestamp(self):
return baseconv.base62.encode(int(time.time()))
def sign(self, value):
value = force_str(value)
value = str('%s%s%s') % (value, self.sep, self.timestamp())
return super(TimestampSigner, self).sign(value)
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super(TimestampSigner, self).unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
if isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value
| mit |
noba3/KoTos | addons/plugin.video.genesis/resources/lib/resolvers/zettahost.py | 5 | 1471 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://zettahost.tv/embed-%s.html' % url
result = client.request(url, mobile=True)
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = jsunpack.unpack(result)
url = client.parseDOM(result, 'embed', ret='src')
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[0].split('://', 1)[-1]
return url
except:
return
| gpl-2.0 |
DylannCordel/django-categories | categories/south_migrations/0007_auto__add_field_category_active.py | 14 | 3914 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Category.active'
db.add_column('categories_category', 'active', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Category.active'
db.delete_column('categories_category', 'active')
models = {
'categories.category': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'alternate_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'alternate_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'meta_extra': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['categories.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'categories.categoryrelation': {
'Meta': {'object_name': 'CategoryRelation'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'relation_type': ('django.db.models.fields.CharField', [], {'max_length': "'200'", 'null': 'True', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Category']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['categories']
| apache-2.0 |
OlafLee/Theano-Lights | models/ffn_bn_vat.py | 11 | 5923 | import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.downsample import max_pool_2d
from theano.tensor.shared_randomstreams import RandomStreams
import numpy as np
from toolbox import *
from modelbase import *
import itertools
class FFN_bn_vat(ModelSLBase):
"""
Virtual adversarial training with batch normalization
"""
def save(self):
if not os.path.exists('savedmodels\\'):
os.makedirs('savedmodels\\')
self.params.save(self.filename)
self.shared_vars.save(self.filename + '_vars')
def __init__(self, data, hp):
super(FFN_bn_vat, self).__init__(self.__class__.__name__, data, hp)
self.epsilon = 0.001
self.params = Parameters()
self.shared_vars = Parameters()
n_x = self.data['n_x']
n_y = self.data['n_y']
n_h1 = 1200
n_h2 = 800
n_h3 = 600
n_h4 = 400
scale = hp.init_scale
dropout_rate = 0.3
if hp.load_model and os.path.isfile(self.filename):
self.params.load(self.filename)
self.shared_vars.load(self.filename + '_vars')
else:
with self.params:
w_h = shared_normal((n_x, n_h1), scale=scale)
gamma = shared_uniform((n_h1,), range=[0.95, 1.05])
b_h = shared_zeros((n_h1,))
w_h2 = shared_normal((n_h1, n_h2), scale=scale)
gamma2 = shared_uniform((n_h2,), range=[0.95, 1.05])
b_h2 = shared_zeros((n_h2,))
w_h3 = shared_normal((n_h2, n_h3), scale=scale)
gamma3 = shared_uniform((n_h3,), range=[0.95, 1.05])
b_h3 = shared_zeros((n_h3,))
w_h4 = shared_normal((n_h3, n_h4), scale=scale)
gamma4 = shared_uniform((n_h4,), range=[0.95, 1.05])
b_h4 = shared_zeros((n_h4,))
w_o = shared_normal((n_h4, n_y), scale=scale)
with self.shared_vars:
m_shared = shared_zeros((1, n_h1), broadcastable=(True, False))
v_shared = shared_zeros((1, n_h1), broadcastable=(True, False))
m_shared2 = shared_zeros((1, n_h2), broadcastable=(True, False))
v_shared2 = shared_zeros((1, n_h2), broadcastable=(True, False))
m_shared3 = shared_zeros((1, n_h3), broadcastable=(True, False))
v_shared3 = shared_zeros((1, n_h3), broadcastable=(True, False))
m_shared4 = shared_zeros((1, n_h4), broadcastable=(True, False))
v_shared4 = shared_zeros((1, n_h4), broadcastable=(True, False))
def batch_norm(X, m_shared, v_shared, test, add_updates):
if X.ndim > 2:
output_shape = X.shape
X = X.flatten(2)
if test is False:
m = T.mean(X, axis=0, keepdims=True)
v = T.sqrt(T.var(X, axis=0, keepdims=True) + self.epsilon)
if not add_updates is None:
mulfac = 1.0/100
add_updates.append((m_shared, (1.0-mulfac)*m_shared + mulfac*m))
add_updates.append((v_shared, (1.0-mulfac)*v_shared + mulfac*v))
else:
m = m_shared
v = v_shared
X_hat = (X - m) / v
if X.ndim > 2:
X_hat = T.reshape(X_hat, output_shape)
return X_hat
def model(X, params, sv, p_drop_hidden, test, add_updates):
h = batch_norm(T.dot(X, params.w_h), sv.m_shared, sv.v_shared, test, add_updates)
h = params.gamma * h + params.b_h
h = dropout(rectify(h), p_drop_hidden)
h2 = batch_norm(T.dot(h, params.w_h2), sv.m_shared2, sv.v_shared2, test, add_updates)
h2 = params.gamma2 * h2 + params.b_h2
h2 = dropout(rectify(h2), p_drop_hidden)
h3 = batch_norm(T.dot(h2, params.w_h3), sv.m_shared3, sv.v_shared3, test, add_updates)
h3 = params.gamma3 * h3 + params.b_h3
h3 = dropout(rectify(h3), p_drop_hidden)
h4 = batch_norm(T.dot(h3, params.w_h4), sv.m_shared4, sv.v_shared4, test, add_updates)
h4 = params.gamma4 * h4 + params.b_h4
h4 = dropout(rectify(h4), p_drop_hidden)
py_x = softmax(T.dot(h4, params.w_o))
return py_x
add_updates = []
x = self.X + gaussian(self.X.shape, 0.1)
py_x = model(x, self.params, self.shared_vars, dropout_rate, False, add_updates)
cost = -T.sum(self.Y * T.log(py_x))
# VAT
adv_cost_coeff = 1.0
adv_est_noise = 1e-6
adv_noise = 3.0
adv_power_iter = 1
if adv_cost_coeff > 0:
adv_X = normalize(gaussian(self.X.shape, 1.0))
for power_it in xrange(0, adv_power_iter):
d = adv_X*adv_est_noise
adv_est_py_x = model(x + d, self.params, self.shared_vars, dropout_rate, False, None)
cost_adv = -T.sum(py_x * T.log(adv_est_py_x))
adv_X = T.grad(cost=cost_adv, wrt=d)
adv_X = normalize(theano.gradient.disconnected_grad(adv_X))
adv_py_x = model(x + adv_X*adv_noise, self.params, self.shared_vars, dropout_rate, False, None)
py_x_hat = theano.gradient.disconnected_grad(py_x)
adv_cost = -T.sum(py_x_hat * T.log(adv_py_x))
cost += adv_cost_coeff*adv_cost
# -------------------------------------------------
pyx = model(self.X, self.params, self.shared_vars, 0., True, None)
map_pyx = T.argmax(pyx, axis=1)
error_map_pyx = T.sum(T.neq(map_pyx, T.argmax(self.Y, axis=1)))
self.compile(cost, error_map_pyx, add_updates)
| mit |
wfxiang08/django185 | tests/m2m_multiple/tests.py | 228 | 2386 | from __future__ import unicode_literals
from datetime import datetime
from django.test import TestCase
from .models import Article, Category
class M2MMultipleTests(TestCase):
def test_multiple(self):
c1, c2, c3, c4 = [
Category.objects.create(name=name)
for name in ["Sports", "News", "Crime", "Life"]
]
a1 = Article.objects.create(
headline="Area man steals", pub_date=datetime(2005, 11, 27)
)
a1.primary_categories.add(c2, c3)
a1.secondary_categories.add(c4)
a2 = Article.objects.create(
headline="Area man runs", pub_date=datetime(2005, 11, 28)
)
a2.primary_categories.add(c1, c2)
a2.secondary_categories.add(c4)
self.assertQuerysetEqual(
a1.primary_categories.all(), [
"Crime",
"News",
],
lambda c: c.name
)
self.assertQuerysetEqual(
a2.primary_categories.all(), [
"News",
"Sports",
],
lambda c: c.name
)
self.assertQuerysetEqual(
a1.secondary_categories.all(), [
"Life",
],
lambda c: c.name
)
self.assertQuerysetEqual(
c1.primary_article_set.all(), [
"Area man runs",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c1.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c2.primary_article_set.all(), [
"Area man steals",
"Area man runs",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c2.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c3.primary_article_set.all(), [
"Area man steals",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c3.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c4.primary_article_set.all(), []
)
self.assertQuerysetEqual(
c4.secondary_article_set.all(), [
"Area man steals",
"Area man runs",
],
lambda a: a.headline
)
| bsd-3-clause |
davidyezsetz/kuma | vendor/packages/pylint/interfaces.py | 6 | 3111 | # This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:contact@logilab.fr
Interfaces for PyLint objects
"""
__revision__ = "$Id: interfaces.py,v 1.9 2004-04-24 12:14:53 syt Exp $"
from logilab.common.interface import Interface
class IChecker(Interface):
"""This is an base interface, not designed to be used elsewhere than for
sub interfaces definition.
"""
def open(self):
"""called before visiting project (i.e set of modules)"""
def close(self):
"""called after visiting project (i.e set of modules)"""
## def open_module(self):
## """called before visiting a module"""
## def close_module(self):
## """called after visiting a module"""
class IRawChecker(IChecker):
"""interface for checker which need to parse the raw file
"""
def process_module(self, stream):
""" process a module
the module's content is accessible via the stream object
"""
class IASTNGChecker(IChecker):
""" interface for checker which prefers receive events according to
statement type
"""
class ILinter(Interface):
"""interface for the linter class
the linter class will generate events to its registered checkers.
Each checker may interact with the linter instance using this API
"""
def register_checker(self, checker):
"""register a new checker class
checker is a class implementing IrawChecker or / and IASTNGChecker
"""
def add_message(self, msg_id, line=None, node=None, args=None):
"""add the message corresponding to the given id.
If provided, msg is expanded using args
astng checkers should provide the node argument,
raw checkers should provide the line argument.
"""
class IReporter(Interface):
""" reporter collect messages and display results encapsulated in a layout
"""
def add_message(self, msg_id, location, msg):
"""add a message of a given type
msg_id is a message identifier
location is a 3-uple (module, object, line)
msg is the actual message
"""
def display_results(self, layout):
"""display results encapsulated in the layout tree
"""
__all__ = ('IRawChecker', 'IStatable', 'ILinter', 'IReporter')
| mpl-2.0 |
sfam/home-assistant | homeassistant/components/keyboard.py | 4 | 2759 | """
homeassistant.components.keyboard
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to emulate keyboard presses on host machine.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/keyboard/
"""
import logging
from homeassistant.const import (
SERVICE_VOLUME_UP, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_MUTE,
SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_PLAY_PAUSE)
DOMAIN = "keyboard"
REQUIREMENTS = ['pyuserinput==0.1.9']
def volume_up(hass):
""" Press the keyboard button for volume up. """
hass.services.call(DOMAIN, SERVICE_VOLUME_UP)
def volume_down(hass):
""" Press the keyboard button for volume down. """
hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN)
def volume_mute(hass):
""" Press the keyboard button for muting volume. """
hass.services.call(DOMAIN, SERVICE_VOLUME_MUTE)
def media_play_pause(hass):
""" Press the keyboard button for play/pause. """
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE)
def media_next_track(hass):
""" Press the keyboard button for next track. """
hass.services.call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK)
def media_prev_track(hass):
""" Press the keyboard button for prev track. """
hass.services.call(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK)
def setup(hass, config):
""" Listen for keyboard events. """
try:
import pykeyboard
except ImportError:
logging.getLogger(__name__).exception(
"Error while importing dependency PyUserInput.")
return False
keyboard = pykeyboard.PyKeyboard()
keyboard.special_key_assignment()
hass.services.register(DOMAIN, SERVICE_VOLUME_UP,
lambda service:
keyboard.tap_key(keyboard.volume_up_key))
hass.services.register(DOMAIN, SERVICE_VOLUME_DOWN,
lambda service:
keyboard.tap_key(keyboard.volume_down_key))
hass.services.register(DOMAIN, SERVICE_VOLUME_MUTE,
lambda service:
keyboard.tap_key(keyboard.volume_mute_key))
hass.services.register(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE,
lambda service:
keyboard.tap_key(keyboard.media_play_pause_key))
hass.services.register(DOMAIN, SERVICE_MEDIA_NEXT_TRACK,
lambda service:
keyboard.tap_key(keyboard.media_next_track_key))
hass.services.register(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK,
lambda service:
keyboard.tap_key(keyboard.media_prev_track_key))
return True
| mit |
CUCWD/edx-platform | common/djangoapps/entitlements/tests/factories.py | 17 | 1156 | import string
from uuid import uuid4
import factory
from factory.fuzzy import FuzzyChoice, FuzzyText
from student.tests.factories import UserFactory
from course_modes.helpers import CourseMode
from entitlements.models import CourseEntitlement, CourseEntitlementPolicy
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from student.tests.factories import UserFactory
class CourseEntitlementPolicyFactory(factory.django.DjangoModelFactory):
"""
Factory for a a CourseEntitlementPolicy
"""
class Meta(object):
model = CourseEntitlementPolicy
site = factory.SubFactory(SiteFactory)
class CourseEntitlementFactory(factory.django.DjangoModelFactory):
class Meta(object):
model = CourseEntitlement
uuid = factory.LazyFunction(uuid4)
course_uuid = factory.LazyFunction(uuid4)
expired_at = None
mode = FuzzyChoice([CourseMode.VERIFIED, CourseMode.PROFESSIONAL])
user = factory.SubFactory(UserFactory)
order_number = FuzzyText(prefix='TEXTX', chars=string.digits)
enrollment_course_run = None
policy = factory.SubFactory(CourseEntitlementPolicyFactory)
| agpl-3.0 |
kawamon/hue | desktop/core/ext-py/Django-1.11.29/django/contrib/gis/geos/linestring.py | 136 | 6019 | from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry, LinearGeometryMixin
from django.contrib.gis.geos.point import Point
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class LineString(LinearGeometryMixin, GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
has_cs = True
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1:
coords = args[0]
else:
coords = args
if not (isinstance(coords, (tuple, list)) or numpy and isinstance(coords, numpy.ndarray)):
raise TypeError('Invalid initialization input for LineStrings.')
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid')
ncoords = len(coords)
if not ncoords:
super(LineString, self).__init__(self._init_func(None), srid=srid)
return
if ncoords < self._minlength:
raise ValueError(
'%s requires at least %d points, got %s.' % (
self.__class__.__name__,
self._minlength,
ncoords,
)
)
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ndim = None
# Incrementing through each of the coordinates and verifying
for coord in coords:
if not isinstance(coord, (tuple, list, Point)):
raise TypeError('Each coordinate should be a sequence (list or tuple)')
if ndim is None:
ndim = len(coord)
self._checkdim(ndim)
elif len(coord) != ndim:
raise TypeError('Dimension mismatch.')
numpy_coords = False
else:
shape = coords.shape # Using numpy's shape.
if len(shape) != 2:
raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ndim = shape[1]
numpy_coords = True
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim == 3))
for i in range(ncoords):
if numpy_coords:
cs[i] = coords[i, :]
elif isinstance(coords[i], Point):
cs[i] = coords[i].tuple
else:
cs[i] = coords[i]
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3):
raise TypeError('Dimension mismatch.')
# #### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in range(len(self))]
if numpy:
return numpy.array(lst) # ARRRR!
else:
return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz:
return None
else:
return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minlength = 4
_init_func = capi.create_linearring
| apache-2.0 |
jose36/plugin.video.ProyectoLuzDigital | servers/turbobit.py | 44 | 1401 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para turbobit
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[turbobit.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://turbobit.net/scz8lxrrgllr.html
# http://www.turbobit.net/uzo3gcyfmt4b.html
# http://turbobit.net/eaz9ha3gop65/deadliest.catch.s08e09-killers.mp4.html
patronvideos = '(turbobit.net/[0-9a-z]+)'
logger.info("[turbobit.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[turbobit]"
url = "http://"+match+".html"
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'turbobit' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| apache-2.0 |
repuestos-365/server | client/node_modules/node-gyp/gyp/tools/pretty_gyp.py | 2618 | 4756 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
estherbester/montybot | montybot/message.py | 2 | 2844 | import re
from unknown_replies import smartass_reply
from unknown_replies import greedy_reply
class Message(object):
MAX_COMMANDS = 3
"""
A message comes from a user and is sent to a channel.
The MainBot instance handles the message.
"""
def __init__(self, user, channel, message, bot_instance):
"""
Initialize the Message object that will be handled smartly.
"""
self.user = user
self.channel = channel
self.message = message
self.bot_instance = bot_instance
def handle(self):
"""
Bot needs to know whether a given message has been handled
"""
self.bot_instance.handled = False
self._handle()
def run_command_plugins(self):
if self._is_command() and not self.bot_instance.handled:
command = self._get_msg_content()
self._match_command()
def run_taunt_plugins(self):
if self.bot_instance.factory.taunt_plugins:
for plugin in self.bot_instance.factory.taunt_plugins:
plugin.run(self.user, self.channel, self.message, self.bot_instance)
def run_message_plugins(self):
for plugin in self.bot_instance.factory.message_plugins:
plugin.run(self.user, self.channel, self.message, self.bot_instance)
def _handle(self):
"""
Run through the various plugins for a given message.
The order matters: taunt plugnis and command plugins
can't be combined. Message plugins will always respond
"""
self.run_taunt_plugins()
self.run_command_plugins()
self.run_message_plugins()
def _get_msg_content(self):
""" return the message content only.
"""
message = self.message.strip()
return re.compile(self.bot_instance.nickname + "[:,]* ?", re.I).sub('', message)
def _match_command(self):
"""
Call the command that matches anything in our command dict.
If no matches, return a smartass reply.
"""
commands = [func for command, func in self.bot_instance.commands.items() \
if self._msg_contains_cmd(command)]
# if nothing available, send a smartass reply
if len(commands) > 0:
if len(commands) <= self.MAX_COMMANDS:
for cmd in commands:
cmd.__call__(self.user, self.channel)
else:
self.bot_instance.msg(self.channel, greedy_reply())
else:
self.bot_instance.msg(self.channel, smartass_reply())
def _msg_contains_cmd(self, cmd):
""" Scrub the message """
msg = self.message.lower()
cmd = cmd.lower()
return cmd in msg
def _is_command(self):
return self.message.startswith(self.bot_instance.nickname)
| mit |
francbartoli/geonode | geonode/sitemap.py | 19 | 1549 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib.sitemaps import Sitemap
from geonode.maps.models import Layer, Map
from guardian.shortcuts import get_objects_for_user
from django.contrib.auth.models import AnonymousUser
class LayerSitemap(Sitemap):
changefreq = "never"
priority = 0.5
def items(self):
permitted = get_objects_for_user(AnonymousUser(), 'base.view_resourcebase')
return Layer.objects.filter(id__in=permitted)
def lastmod(self, obj):
return obj.date
class MapSitemap(Sitemap):
changefreq = "never"
priority = 0.5
def items(self):
permitted = get_objects_for_user(AnonymousUser(), 'base.view_resourcebase')
return Map.objects.filter(id__in=permitted)
| gpl-3.0 |
sxjscience/tvm | tests/python/unittest/test_autotvm_index_tuner.py | 2 | 2478 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test index based tuners"""
from test_autotvm_common import DummyRunner, get_sample_task
from tvm import autotvm
from tvm.autotvm.tuner import GridSearchTuner, RandomTuner
def test_gridsearch_tuner():
"""Test GridSearchTuner"""
task, _ = get_sample_task()
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
# When no range index, range_length should be the length of config space
tuner = autotvm.tuner.GridSearchTuner(task)
assert tuner.range_length == len(task.config_space)
assert tuner.index_offset == 0
# With range index, range_length should be the length of the specified range
tuner = autotvm.tuner.GridSearchTuner(task, range_idx=(8, 15))
assert tuner.range_length == 8
assert tuner.index_offset == 8
# Tuner should only focus on the specified range
tuner.tune(n_trial=8, measure_option=measure_option)
assert tuner.counter == 8
assert not tuner.has_next()
def test_random_tuner():
"""Test RandomTuner"""
task, _ = get_sample_task()
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
tuner = autotvm.tuner.RandomTuner(task, range_idx=(8, 15))
assert tuner.range_length == 8
assert tuner.index_offset == 8
# Tuner should only focus on the specified range and should visit all indices
tuner.tune(n_trial=8, measure_option=measure_option)
assert tuner.counter == 8
assert not tuner.has_next()
visited = set()
for idx in tuner.visited:
assert idx not in visited
assert 8 <= idx <= 15
if __name__ == "__main__":
test_gridsearch_tuner()
test_random_tuner()
| apache-2.0 |
shubhdev/openedx | lms/djangoapps/open_ended_grading/views.py | 5 | 15392 | import logging
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from courseware.courses import get_course_with_access
from courseware.access import has_access
from courseware.tabs import EnrolledTab
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
import json
from student.models import unique_id_for_user
from open_ended_grading import open_ended_notifications
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import search
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.exceptions import NoPathToItem
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.utils.translation import ugettext as _
from open_ended_grading.utils import (
STAFF_ERROR_MESSAGE, StudentProblemList, generate_problem_url, create_controller_query_service
)
log = logging.getLogger(__name__)
def _reverse_with_slash(url_name, course_key):
"""
Reverses the URL given the name and the course id, and then adds a trailing slash if
it does not exist yet.
@param url_name: The name of the url (eg 'staff_grading').
@param course_id: The id of the course object (eg course.id).
@returns: The reversed url with a trailing slash.
"""
ajax_url = _reverse_without_slash(url_name, course_key)
if not ajax_url.endswith('/'):
ajax_url += '/'
return ajax_url
def _reverse_without_slash(url_name, course_key):
course_id = course_key.to_deprecated_string()
ajax_url = reverse(url_name, kwargs={'course_id': course_id})
return ajax_url
DESCRIPTION_DICT = {
'Peer Grading': _("View all problems that require peer assessment in this particular course."),
'Staff Grading': _("View ungraded submissions submitted by students for the open ended problems in the course."),
'Problems you have submitted': _("View open ended problems that you have previously submitted for grading."),
'Flagged Submissions': _("View submissions that have been flagged by students as inappropriate."),
}
ALERT_DICT = {
'Peer Grading': _("New submissions to grade"),
'Staff Grading': _("New submissions to grade"),
'Problems you have submitted': _("New grades have been returned"),
'Flagged Submissions': _("Submissions have been flagged for review"),
}
class StaffGradingTab(EnrolledTab):
"""
A tab for staff grading.
"""
type = 'staff_grading'
title = _("Staff grading")
view_name = "staff_grading"
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if user and not has_access(user, 'staff', course, course.id):
return False
return "combinedopenended" in course.advanced_modules
class PeerGradingTab(EnrolledTab):
"""
A tab for peer grading.
"""
type = 'peer_grading'
# Translators: "Peer grading" appears on a tab that allows
# students to view open-ended problems that require grading
title = _("Peer grading")
view_name = "peer_grading"
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(PeerGradingTab, cls).is_enabled(course, user=user):
return False
return "combinedopenended" in course.advanced_modules
class OpenEndedGradingTab(EnrolledTab):
"""
A tab for open ended grading.
"""
type = 'open_ended'
# Translators: "Open Ended Panel" appears on a tab that, when clicked, opens up a panel that
# displays information about open-ended problems that a user has submitted or needs to grade
title = _("Open Ended Panel")
view_name = "open_ended_notifications"
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(OpenEndedGradingTab, cls).is_enabled(course, user=user):
return False
return "combinedopenended" in course.advanced_modules
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id):
"""
Show the instructor grading interface.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'staff', course_key)
ajax_url = _reverse_with_slash('staff_grading', course_key)
return render_to_response('instructor/staff_grading.html', {
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
# Checked above
'staff_access': True, })
def find_peer_grading_module(course):
"""
Given a course, finds the first peer grading module in it.
@param course: A course object.
@return: boolean found_module, string problem_url
"""
# Reverse the base course url.
base_course_url = reverse('courses')
found_module = False
problem_url = ""
# Get the peer grading modules currently in the course. Explicitly specify the course id to avoid issues with different runs.
items = modulestore().get_items(course.id, qualifiers={'category': 'peergrading'})
# See if any of the modules are centralized modules (ie display info from multiple problems)
items = [i for i in items if not getattr(i, "use_for_single_location", True)]
# Loop through all potential peer grading modules, and find the first one that has a path to it.
for item in items:
# Generate a url for the first module and redirect the user to it.
try:
problem_url_parts = search.path_to_location(modulestore(), item.location)
except NoPathToItem:
# In the case of nopathtoitem, the peer grading module that was found is in an invalid state, and
# can no longer be accessed. Log an informational message, but this will not impact normal behavior.
log.info(u"Invalid peer grading module location %s in course %s. This module may need to be removed.", item.location, course.id)
continue
problem_url = generate_problem_url(problem_url_parts, base_course_url)
found_module = True
return found_module, problem_url
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def peer_grading(request, course_id):
'''
When a student clicks on the "peer grading" button in the open ended interface, link them to a peer grading
xmodule in the course.
'''
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
#Get the current course
course = get_course_with_access(request.user, 'load', course_key)
found_module, problem_url = find_peer_grading_module(course)
if not found_module:
error_message = _("""
Error with initializing peer grading.
There has not been a peer grading module created in the courseware that would allow you to grade others.
Please check back later for this.
""")
log.exception(error_message + u"Current course is: {0}".format(course_id))
return HttpResponse(error_message)
return HttpResponseRedirect(problem_url)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def student_problem_list(request, course_id):
"""
Show a list of problems they have attempted to a student.
Fetch the list from the grading controller server and append some data.
@param request: The request object for this view.
@param course_id: The id of the course to get the problem list for.
@return: Renders an HTML problem list table.
"""
assert isinstance(course_id, basestring)
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
# Load the course. Don't catch any errors here, as we want them to be loud.
course = get_course_with_access(request.user, 'load', course_key)
# The anonymous student id is needed for communication with ORA.
student_id = unique_id_for_user(request.user)
base_course_url = reverse('courses')
error_text = ""
student_problem_list = StudentProblemList(course_key, student_id)
# Get the problem list from ORA.
success = student_problem_list.fetch_from_grading_service()
# If we fetched the problem list properly, add in additional problem data.
if success:
# Add in links to problems.
valid_problems = student_problem_list.add_problem_data(base_course_url)
else:
# Get an error message to show to the student.
valid_problems = []
error_text = student_problem_list.error_text
ajax_url = _reverse_with_slash('open_ended_problems', course_key)
context = {
'course': course,
'course_id': course_key.to_deprecated_string(),
'ajax_url': ajax_url,
'success': success,
'problem_list': valid_problems,
'error_text': error_text,
# Checked above
'staff_access': False,
}
return render_to_response('open_ended_problems/open_ended_problems.html', context)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def flagged_problem_list(request, course_id):
'''
Show a student problem list
'''
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'staff', course_key)
# call problem list service
success = False
error_text = ""
problem_list = []
# Make a service that can query edX ORA.
controller_qs = create_controller_query_service()
try:
problem_list_dict = controller_qs.get_flagged_problem_list(course_key)
success = problem_list_dict['success']
if 'error' in problem_list_dict:
error_text = problem_list_dict['error']
problem_list = []
else:
problem_list = problem_list_dict['flagged_submissions']
except GradingServiceError:
#This is a staff_facing_error
error_text = STAFF_ERROR_MESSAGE
#This is a dev_facing_error
log.error("Could not get flagged problem list from external grading service for open ended.")
success = False
# catch error if if the json loads fails
except ValueError:
#This is a staff_facing_error
error_text = STAFF_ERROR_MESSAGE
#This is a dev_facing_error
log.error("Could not parse problem list from external grading service response.")
success = False
ajax_url = _reverse_with_slash('open_ended_flagged_problems', course_key)
context = {
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
'success': success,
'problem_list': problem_list,
'error_text': error_text,
# Checked above
'staff_access': True,
}
return render_to_response('open_ended_problems/open_ended_flagged_problems.html', context)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def combined_notifications(request, course_id):
"""
Gets combined notifications from the grading controller and displays them
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
user = request.user
notifications = open_ended_notifications.combined_notifications(course, user)
response = notifications['response']
notification_tuples = open_ended_notifications.NOTIFICATION_TYPES
notification_list = []
for response_num in xrange(len(notification_tuples)):
tag = notification_tuples[response_num][0]
if tag in response:
url_name = notification_tuples[response_num][1]
human_name = notification_tuples[response_num][2]
url = _reverse_without_slash(url_name, course_key)
has_img = response[tag]
# check to make sure we have descriptions and alert messages
if human_name in DESCRIPTION_DICT:
description = DESCRIPTION_DICT[human_name]
else:
description = ""
if human_name in ALERT_DICT:
alert_message = ALERT_DICT[human_name]
else:
alert_message = ""
notification_item = {
'url': url,
'name': human_name,
'alert': has_img,
'description': description,
'alert_message': alert_message
}
#The open ended panel will need to link the "peer grading" button in the panel to a peer grading
#xmodule defined in the course. This checks to see if the human name of the server notification
#that we are currently processing is "peer grading". If it is, it looks for a peer grading
#module in the course. If none exists, it removes the peer grading item from the panel.
if human_name == "Peer Grading":
found_module, problem_url = find_peer_grading_module(course)
if found_module:
notification_list.append(notification_item)
else:
notification_list.append(notification_item)
ajax_url = _reverse_with_slash('open_ended_notifications', course_key)
combined_dict = {
'error_text': "",
'notification_list': notification_list,
'course': course,
'success': True,
'ajax_url': ajax_url,
}
return render_to_response('open_ended_problems/combined_notifications.html', combined_dict)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def take_action_on_flags(request, course_id):
"""
Takes action on student flagged submissions.
Currently, only support unflag and ban actions.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if request.method != 'POST':
raise Http404
required = ['submission_id', 'action_type', 'student_id']
for key in required:
if key not in request.POST:
error_message = u'Missing key {0} from submission. Please reload and try again.'.format(key)
response = {
'success': False,
'error': STAFF_ERROR_MESSAGE + error_message
}
return HttpResponse(json.dumps(response), mimetype="application/json")
p = request.POST
submission_id = p['submission_id']
action_type = p['action_type']
student_id = p['student_id']
student_id = student_id.strip(' \t\n\r')
submission_id = submission_id.strip(' \t\n\r')
action_type = action_type.lower().strip(' \t\n\r')
# Make a service that can query edX ORA.
controller_qs = create_controller_query_service()
try:
response = controller_qs.take_action_on_flags(course_key, student_id, submission_id, action_type)
return HttpResponse(json.dumps(response), mimetype="application/json")
except GradingServiceError:
log.exception(
u"Error taking action on flagged peer grading submissions, "
u"submission_id: {0}, action_type: {1}, grader_id: {2}"
.format(submission_id, action_type, student_id)
)
response = {
'success': False,
'error': STAFF_ERROR_MESSAGE
}
return HttpResponse(json.dumps(response), mimetype="application/json")
| agpl-3.0 |
Jonekee/chromium.src | chrome/common/extensions/docs/server2/document_renderer.py | 85 | 4614 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from document_parser import ParseDocument
from platform_util import ExtractPlatformFromURL
from third_party.json_schema_compiler.model import UnixName
class DocumentRenderer(object):
'''Performs document-level rendering such as the title, references,
and table of contents: pulling that data out of the document, then
replacing the $(title), $(ref:...) and $(table_of_contents) tokens with them.
This can be thought of as a parallel to TemplateRenderer; while
TemplateRenderer is responsible for interpreting templates and rendering files
within the template engine, DocumentRenderer is responsible for interpreting
higher-level document concepts like the title and TOC, then performing string
replacement for them. The syntax for this replacement is $(...) where ... is
the concept. Currently title and table_of_contents are supported.
'''
def __init__(self, table_of_contents_renderer, platform_bundle):
self._table_of_contents_renderer = table_of_contents_renderer
self._platform_bundle = platform_bundle
def _RenderLinks(self, document, path):
''' Replaces all $(ref:...) references in |document| with html links.
References have two forms:
$(ref:api.node) - Replaces the reference with a link to node on the
API page. The title is set to the name of the node.
$(ref:api.node Title) - Same as the previous form, but title is set
to "Title".
'''
START_REF = '$(ref:'
END_REF = ')'
MAX_REF_LENGTH = 256
new_document = []
# Keeps track of position within |document|
cursor_index = 0
start_ref_index = document.find(START_REF)
while start_ref_index != -1:
end_ref_index = document.find(END_REF, start_ref_index)
if (end_ref_index == -1 or
end_ref_index - start_ref_index > MAX_REF_LENGTH):
end_ref_index = document.find(' ', start_ref_index)
logging.error('%s:%s has no terminating ) at line %s' % (
path,
document[start_ref_index:end_ref_index],
document.count('\n', 0, end_ref_index)))
new_document.append(document[cursor_index:end_ref_index + 1])
else:
ref = document[start_ref_index:end_ref_index]
ref_parts = ref[len(START_REF):].split(None, 1)
# Guess the api name from the html name, replacing '_' with '.' (e.g.
# if the page is app_window.html, guess the api name is app.window)
api_name = os.path.splitext(os.path.basename(path))[0].replace('_', '.')
title = ref_parts[0] if len(ref_parts) == 1 else ref_parts[1]
platform = ExtractPlatformFromURL(path)
if platform is None:
logging.error('Cannot resolve reference without a platform.')
continue
ref_dict = self._platform_bundle.GetReferenceResolver(
platform).SafeGetLink(ref_parts[0],
namespace=api_name,
title=title,
path=path)
new_document.append(document[cursor_index:start_ref_index])
new_document.append('<a href=%s/%s>%s</a>' % (
self._platform_bundle._base_path + platform,
ref_dict['href'],
ref_dict['text']))
cursor_index = end_ref_index + 1
start_ref_index = document.find(START_REF, cursor_index)
new_document.append(document[cursor_index:])
return ''.join(new_document)
def Render(self, document, path, render_title=False):
''' |document|: document to be rendered.
|path|: request path to the document.
|render_title|: boolean representing whether or not to render a title.
'''
# Render links first so that parsing and later replacements aren't
# affected by $(ref...) substitutions
document = self._RenderLinks(document, path)
parsed_document = ParseDocument(document, expect_title=render_title)
toc_text, toc_warnings = self._table_of_contents_renderer.Render(
parsed_document.sections)
# Only 1 title and 1 table of contents substitution allowed; in the common
# case, save necessarily running over the entire file.
if parsed_document.title:
document = document.replace('$(title)', parsed_document.title, 1)
return (document.replace('$(table_of_contents)', toc_text, 1),
parsed_document.warnings + toc_warnings)
| bsd-3-clause |
hifly/OpenUpgrade | addons/account/account_bank_statement.py | 24 | 56320 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools import float_is_zero
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.report import report_sxw
from openerp.tools import float_compare, float_round
import time
class account_bank_statement(osv.osv):
def create(self, cr, uid, vals, context=None):
if vals.get('name', '/') == '/':
journal_id = vals.get('journal_id', self._default_journal_id(cr, uid, context=context))
vals['name'] = self._compute_default_statement_name(cr, uid, journal_id, context=context)
if 'line_ids' in vals:
for idx, line in enumerate(vals['line_ids']):
line[2]['sequence'] = idx + 1
return super(account_bank_statement, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
res = super(account_bank_statement, self).write(cr, uid, ids, vals, context=context)
account_bank_statement_line_obj = self.pool.get('account.bank.statement.line')
for statement in self.browse(cr, uid, ids, context):
for idx, line in enumerate(statement.line_ids):
account_bank_statement_line_obj.write(cr, uid, [line.id], {'sequence': idx + 1}, context=context)
return res
def _default_journal_id(self, cr, uid, context=None):
if context is None:
context = {}
journal_pool = self.pool.get('account.journal')
journal_type = context.get('journal_type', False)
company_id = self.pool.get('res.company')._company_default_get(cr, uid, 'account.bank.statement',context=context)
if journal_type:
ids = journal_pool.search(cr, uid, [('type', '=', journal_type),('company_id','=',company_id)])
if ids:
return ids[0]
return False
def _end_balance(self, cursor, user, ids, name, attr, context=None):
res = {}
for statement in self.browse(cursor, user, ids, context=context):
res[statement.id] = statement.balance_start
for line in statement.line_ids:
res[statement.id] += line.amount
return res
def _get_period(self, cr, uid, context=None):
periods = self.pool.get('account.period').find(cr, uid, context=context)
if periods:
return periods[0]
return False
def _compute_default_statement_name(self, cr, uid, journal_id, context=None):
context = dict(context or {})
obj_seq = self.pool.get('ir.sequence')
period = self.pool.get('account.period').browse(cr, uid, self._get_period(cr, uid, context=context), context=context)
context['fiscalyear_id'] = period.fiscalyear_id.id
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, None)
return obj_seq.next_by_id(cr, uid, journal.sequence_id.id, context=context)
def _currency(self, cursor, user, ids, name, args, context=None):
res = {}
res_currency_obj = self.pool.get('res.currency')
res_users_obj = self.pool.get('res.users')
default_currency = res_users_obj.browse(cursor, user,
user, context=context).company_id.currency_id
for statement in self.browse(cursor, user, ids, context=context):
currency = statement.journal_id.currency
if not currency:
currency = default_currency
res[statement.id] = currency.id
currency_names = {}
for currency_id, currency_name in res_currency_obj.name_get(cursor,
user, [x for x in res.values()], context=context):
currency_names[currency_id] = currency_name
for statement_id in res.keys():
currency_id = res[statement_id]
res[statement_id] = (currency_id, currency_names[currency_id])
return res
def _get_statement(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.bank.statement.line').browse(cr, uid, ids, context=context):
result[line.statement_id.id] = True
return result.keys()
def _all_lines_reconciled(self, cr, uid, ids, name, args, context=None):
res = {}
for statement in self.browse(cr, uid, ids, context=context):
res[statement.id] = all([line.journal_entry_id.id or line.account_id.id for line in statement.line_ids])
return res
_order = "date desc, id desc"
_name = "account.bank.statement"
_description = "Bank Statement"
_inherit = ['mail.thread']
_columns = {
'name': fields.char(
'Reference', states={'draft': [('readonly', False)]},
readonly=True, # readonly for account_cash_statement
copy=False,
help='if you give the Name other then /, its created Accounting Entries Move '
'will be with same name as statement name. '
'This allows the statement entries to have the same references than the '
'statement itself'),
'date': fields.date('Date', required=True, states={'confirm': [('readonly', True)]},
select=True, copy=False),
'journal_id': fields.many2one('account.journal', 'Journal', required=True,
readonly=True, states={'draft':[('readonly',False)]}),
'period_id': fields.many2one('account.period', 'Period', required=True,
states={'confirm':[('readonly', True)]}),
'balance_start': fields.float('Starting Balance', digits_compute=dp.get_precision('Account'),
states={'confirm':[('readonly',True)]}),
'balance_end_real': fields.float('Ending Balance', digits_compute=dp.get_precision('Account'),
states={'confirm': [('readonly', True)]}, help="Computed using the cash control lines"),
'balance_end': fields.function(_end_balance,
store = {
'account.bank.statement': (lambda self, cr, uid, ids, c={}: ids, ['line_ids','move_line_ids','balance_start'], 10),
'account.bank.statement.line': (_get_statement, ['amount'], 10),
},
string="Computed Balance", help='Balance as calculated based on Opening Balance and transaction lines'),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'line_ids': fields.one2many('account.bank.statement.line',
'statement_id', 'Statement lines',
states={'confirm':[('readonly', True)]}, copy=True),
'move_line_ids': fields.one2many('account.move.line', 'statement_id',
'Entry lines', states={'confirm':[('readonly',True)]}),
'state': fields.selection([('draft', 'New'),
('open','Open'), # used by cash statements
('confirm', 'Closed')],
'Status', required=True, readonly="1",
copy=False,
help='When new statement is created the status will be \'Draft\'.\n'
'And after getting confirmation from the bank it will be in \'Confirmed\' status.'),
'currency': fields.function(_currency, string='Currency',
type='many2one', relation='res.currency'),
'account_id': fields.related('journal_id', 'default_debit_account_id', type='many2one', relation='account.account', string='Account used in this journal', readonly=True, help='used in statement reconciliation domain, but shouldn\'t be used elswhere.'),
'cash_control': fields.related('journal_id', 'cash_control' , type='boolean', relation='account.journal',string='Cash control'),
'all_lines_reconciled': fields.function(_all_lines_reconciled, string='All lines reconciled', type='boolean'),
}
_defaults = {
'name': '/',
'date': fields.date.context_today,
'state': 'draft',
'journal_id': _default_journal_id,
'period_id': _get_period,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.bank.statement',context=c),
}
def _check_company_id(self, cr, uid, ids, context=None):
for statement in self.browse(cr, uid, ids, context=context):
if statement.company_id.id != statement.period_id.company_id.id:
return False
return True
_constraints = [
(_check_company_id, 'The journal and period chosen have to belong to the same company.', ['journal_id','period_id']),
]
def onchange_date(self, cr, uid, ids, date, company_id, context=None):
"""
Find the correct period to use for the given date and company_id, return it and set it in the context
"""
res = {}
period_pool = self.pool.get('account.period')
if context is None:
context = {}
ctx = context.copy()
ctx.update({'company_id': company_id})
pids = period_pool.find(cr, uid, dt=date, context=ctx)
if pids:
res.update({'period_id': pids[0]})
context = dict(context, period_id=pids[0])
return {
'value':res,
'context':context,
}
def button_dummy(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {}, context=context)
def _prepare_move(self, cr, uid, st_line, st_line_number, context=None):
"""Prepare the dict of values to create the move from a
statement line. This method may be overridden to implement custom
move generation (making sure to call super() to establish
a clean extension chain).
:param browse_record st_line: account.bank.statement.line record to
create the move from.
:param char st_line_number: will be used as the name of the generated account move
:return: dict of value to create() the account.move
"""
return {
'journal_id': st_line.statement_id.journal_id.id,
'period_id': st_line.statement_id.period_id.id,
'date': st_line.date,
'name': st_line_number,
'ref': st_line.ref,
}
def _get_counter_part_account(self, cr, uid, st_line, context=None):
"""Retrieve the account to use in the counterpart move.
:param browse_record st_line: account.bank.statement.line record to create the move from.
:return: int/long of the account.account to use as counterpart
"""
if st_line.amount >= 0:
return st_line.statement_id.journal_id.default_credit_account_id.id
return st_line.statement_id.journal_id.default_debit_account_id.id
def _get_counter_part_partner(self, cr, uid, st_line, context=None):
"""Retrieve the partner to use in the counterpart move.
:param browse_record st_line: account.bank.statement.line record to create the move from.
:return: int/long of the res.partner to use as counterpart
"""
return st_line.partner_id and st_line.partner_id.id or False
def _prepare_bank_move_line(self, cr, uid, st_line, move_id, amount, company_currency_id, context=None):
"""Compute the args to build the dict of values to create the counter part move line from a
statement line by calling the _prepare_move_line_vals.
:param browse_record st_line: account.bank.statement.line record to create the move from.
:param int/long move_id: ID of the account.move to link the move line
:param float amount: amount of the move line
:param int/long company_currency_id: ID of currency of the concerned company
:return: dict of value to create() the bank account.move.line
"""
account_id = self._get_counter_part_account(cr, uid, st_line, context=context)
partner_id = self._get_counter_part_partner(cr, uid, st_line, context=context)
debit = ((amount > 0) and amount) or 0.0
credit = ((amount < 0) and -amount) or 0.0
cur_id = False
amt_cur = False
if st_line.statement_id.currency.id != company_currency_id:
amt_cur = st_line.amount
cur_id = st_line.statement_id.currency.id
elif st_line.currency_id and st_line.amount_currency:
amt_cur = st_line.amount_currency
cur_id = st_line.currency_id.id
return self._prepare_move_line_vals(cr, uid, st_line, move_id, debit, credit,
amount_currency=amt_cur, currency_id=cur_id, account_id=account_id,
partner_id=partner_id, context=context)
def _prepare_move_line_vals(self, cr, uid, st_line, move_id, debit, credit, currency_id=False,
amount_currency=False, account_id=False, partner_id=False, context=None):
"""Prepare the dict of values to create the move line from a
statement line.
:param browse_record st_line: account.bank.statement.line record to
create the move from.
:param int/long move_id: ID of the account.move to link the move line
:param float debit: debit amount of the move line
:param float credit: credit amount of the move line
:param int/long currency_id: ID of currency of the move line to create
:param float amount_currency: amount of the debit/credit expressed in the currency_id
:param int/long account_id: ID of the account to use in the move line if different
from the statement line account ID
:param int/long partner_id: ID of the partner to put on the move line
:return: dict of value to create() the account.move.line
"""
acc_id = account_id or st_line.account_id.id
cur_id = currency_id or st_line.statement_id.currency.id
par_id = partner_id or (((st_line.partner_id) and st_line.partner_id.id) or False)
return {
'name': st_line.name,
'date': st_line.date,
'ref': st_line.ref,
'move_id': move_id,
'partner_id': par_id,
'account_id': acc_id,
'credit': credit,
'debit': debit,
'statement_id': st_line.statement_id.id,
'journal_id': st_line.statement_id.journal_id.id,
'period_id': st_line.statement_id.period_id.id,
'currency_id': amount_currency and cur_id,
'amount_currency': amount_currency,
}
def balance_check(self, cr, uid, st_id, journal_type='bank', context=None):
st = self.browse(cr, uid, st_id, context=context)
if not ((abs((st.balance_end or 0.0) - st.balance_end_real) < 0.0001) or (abs((st.balance_end or 0.0) - st.balance_end_real) < 0.0001)):
raise osv.except_osv(_('Error!'),
_('The statement balance is incorrect !\nThe expected balance (%.2f) is different than the computed one. (%.2f)') % (st.balance_end_real, st.balance_end))
return True
def statement_close(self, cr, uid, ids, journal_type='bank', context=None):
return self.write(cr, uid, ids, {'state':'confirm'}, context=context)
def check_status_condition(self, cr, uid, state, journal_type='bank'):
return state in ('draft','open')
def button_confirm_bank(self, cr, uid, ids, context=None):
if context is None:
context = {}
for st in self.browse(cr, uid, ids, context=context):
j_type = st.journal_id.type
if not self.check_status_condition(cr, uid, st.state, journal_type=j_type):
continue
self.balance_check(cr, uid, st.id, journal_type=j_type, context=context)
if (not st.journal_id.default_credit_account_id) \
or (not st.journal_id.default_debit_account_id):
raise osv.except_osv(_('Configuration Error!'), _('Please verify that an account is defined in the journal.'))
for line in st.move_line_ids:
if line.state != 'valid':
raise osv.except_osv(_('Error!'), _('The account entries lines are not in valid state.'))
move_ids = []
for st_line in st.line_ids:
if not st_line.amount:
continue
if st_line.account_id and not st_line.journal_entry_id.id:
#make an account move as before
vals = {
'debit': st_line.amount < 0 and -st_line.amount or 0.0,
'credit': st_line.amount > 0 and st_line.amount or 0.0,
'account_id': st_line.account_id.id,
'name': st_line.name
}
self.pool.get('account.bank.statement.line').process_reconciliation(cr, uid, st_line.id, [vals], context=context)
elif not st_line.journal_entry_id.id:
raise osv.except_osv(_('Error!'), _('All the account entries lines must be processed in order to close the statement.'))
move_ids.append(st_line.journal_entry_id.id)
if move_ids:
self.pool.get('account.move').post(cr, uid, move_ids, context=context)
self.message_post(cr, uid, [st.id], body=_('Statement %s confirmed, journal items were created.') % (st.name,), context=context)
self.link_bank_to_partner(cr, uid, ids, context=context)
return self.write(cr, uid, ids, {'state': 'confirm', 'closing_date': time.strftime("%Y-%m-%d %H:%M:%S")}, context=context)
def button_cancel(self, cr, uid, ids, context=None):
bnk_st_line_ids = []
for st in self.browse(cr, uid, ids, context=context):
bnk_st_line_ids += [line.id for line in st.line_ids]
self.pool.get('account.bank.statement.line').cancel(cr, uid, bnk_st_line_ids, context=context)
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def _compute_balance_end_real(self, cr, uid, journal_id, context=None):
res = False
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal.with_last_closing_balance:
cr.execute('SELECT balance_end_real \
FROM account_bank_statement \
WHERE journal_id = %s AND NOT state = %s \
ORDER BY date DESC,id DESC LIMIT 1', (journal_id, 'draft'))
res = cr.fetchone()
return res and res[0] or 0.0
def onchange_journal_id(self, cr, uid, statement_id, journal_id, context=None):
if not journal_id:
return {}
balance_start = self._compute_balance_end_real(cr, uid, journal_id, context=context)
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
currency = journal.currency or journal.company_id.currency_id
res = {'balance_start': balance_start, 'company_id': journal.company_id.id, 'currency': currency.id}
if journal.type == 'cash':
res['cash_control'] = journal.cash_control
return {'value': res}
def unlink(self, cr, uid, ids, context=None):
statement_line_obj = self.pool['account.bank.statement.line']
for item in self.browse(cr, uid, ids, context=context):
if item.state != 'draft':
raise osv.except_osv(
_('Invalid Action!'),
_('In order to delete a bank statement, you must first cancel it to delete related journal items.')
)
# Explicitly unlink bank statement lines
# so it will check that the related journal entries have
# been deleted first
statement_line_obj.unlink(cr, uid, [line.id for line in item.line_ids], context=context)
return super(account_bank_statement, self).unlink(cr, uid, ids, context=context)
def button_journal_entries(self, cr, uid, ids, context=None):
ctx = (context or {}).copy()
ctx['journal_id'] = self.browse(cr, uid, ids[0], context=context).journal_id.id
return {
'name': _('Journal Items'),
'view_type':'form',
'view_mode':'tree',
'res_model':'account.move.line',
'view_id':False,
'type':'ir.actions.act_window',
'domain':[('statement_id','in',ids)],
'context':ctx,
}
def number_of_lines_reconciled(self, cr, uid, ids, context=None):
bsl_obj = self.pool.get('account.bank.statement.line')
return bsl_obj.search_count(cr, uid, [('statement_id', 'in', ids), ('journal_entry_id', '!=', False)], context=context)
def link_bank_to_partner(self, cr, uid, ids, context=None):
for statement in self.browse(cr, uid, ids, context=context):
for st_line in statement.line_ids:
if st_line.bank_account_id and st_line.partner_id and st_line.bank_account_id.partner_id.id != st_line.partner_id.id:
# Update the partner informations of the bank account, possibly overriding existing ones
bank_obj = self.pool.get('res.partner.bank')
bank_vals = bank_obj.onchange_partner_id(cr, uid, [st_line.bank_account_id.id], st_line.partner_id.id, context=context)['value']
bank_vals.update({'partner_id': st_line.partner_id.id})
bank_obj.write(cr, uid, [st_line.bank_account_id.id], bank_vals, context=context)
class account_bank_statement_line(osv.osv):
def create(self, cr, uid, vals, context=None):
if vals.get('amount_currency', 0) and not vals.get('amount', 0):
raise osv.except_osv(_('Error!'), _('If "Amount Currency" is specified, then "Amount" must be as well.'))
return super(account_bank_statement_line, self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
for item in self.browse(cr, uid, ids, context=context):
if item.journal_entry_id:
raise osv.except_osv(
_('Invalid Action!'),
_('In order to delete a bank statement line, you must first cancel it to delete related journal items.')
)
return super(account_bank_statement_line, self).unlink(cr, uid, ids, context=context)
def cancel(self, cr, uid, ids, context=None):
account_move_obj = self.pool.get('account.move')
move_ids = []
for line in self.browse(cr, uid, ids, context=context):
if line.journal_entry_id:
move_ids.append(line.journal_entry_id.id)
for aml in line.journal_entry_id.line_id:
if aml.reconcile_id:
move_lines = [l.id for l in aml.reconcile_id.line_id]
move_lines.remove(aml.id)
self.pool.get('account.move.reconcile').unlink(cr, uid, [aml.reconcile_id.id], context=context)
if len(move_lines) >= 2:
self.pool.get('account.move.line').reconcile_partial(cr, uid, move_lines, 'auto', context=context)
if move_ids:
account_move_obj.button_cancel(cr, uid, move_ids, context=context)
account_move_obj.unlink(cr, uid, move_ids, context)
def get_data_for_reconciliations(self, cr, uid, ids, excluded_ids=None, search_reconciliation_proposition=True, context=None):
""" Returns the data required to display a reconciliation, for each statement line id in ids """
ret = []
if excluded_ids is None:
excluded_ids = []
for st_line in self.browse(cr, uid, ids, context=context):
reconciliation_data = {}
if search_reconciliation_proposition:
reconciliation_proposition = self.get_reconciliation_proposition(cr, uid, st_line, excluded_ids=excluded_ids, context=context)
for mv_line in reconciliation_proposition:
excluded_ids.append(mv_line['id'])
reconciliation_data['reconciliation_proposition'] = reconciliation_proposition
else:
reconciliation_data['reconciliation_proposition'] = []
st_line = self.get_statement_line_for_reconciliation(cr, uid, st_line, context=context)
reconciliation_data['st_line'] = st_line
ret.append(reconciliation_data)
return ret
def get_statement_line_for_reconciliation(self, cr, uid, st_line, context=None):
""" Returns the data required by the bank statement reconciliation widget to display a statement line """
if context is None:
context = {}
statement_currency = st_line.journal_id.currency or st_line.journal_id.company_id.currency_id
rml_parser = report_sxw.rml_parse(cr, uid, 'reconciliation_widget_asl', context=context)
if st_line.amount_currency and st_line.currency_id:
amount = st_line.amount_currency
amount_currency = st_line.amount
amount_currency_str = amount_currency > 0 and amount_currency or -amount_currency
amount_currency_str = rml_parser.formatLang(amount_currency_str, currency_obj=statement_currency)
else:
amount = st_line.amount
amount_currency_str = ""
amount_str = amount > 0 and amount or -amount
amount_str = rml_parser.formatLang(amount_str, currency_obj=st_line.currency_id or statement_currency)
data = {
'id': st_line.id,
'ref': st_line.ref,
'note': st_line.note or "",
'name': st_line.name,
'date': st_line.date,
'amount': amount,
'amount_str': amount_str, # Amount in the statement line currency
'currency_id': st_line.currency_id.id or statement_currency.id,
'partner_id': st_line.partner_id.id,
'statement_id': st_line.statement_id.id,
'account_code': st_line.journal_id.default_debit_account_id.code,
'account_name': st_line.journal_id.default_debit_account_id.name,
'partner_name': st_line.partner_id.name,
'communication_partner_name': st_line.partner_name,
'amount_currency_str': amount_currency_str, # Amount in the statement currency
'has_no_partner': not st_line.partner_id.id,
}
if st_line.partner_id.id:
if amount > 0:
data['open_balance_account_id'] = st_line.partner_id.property_account_receivable.id
else:
data['open_balance_account_id'] = st_line.partner_id.property_account_payable.id
return data
def _domain_reconciliation_proposition(self, cr, uid, st_line, excluded_ids=None, context=None):
if excluded_ids is None:
excluded_ids = []
domain = [('ref', '=', st_line.name),
('reconcile_id', '=', False),
('state', '=', 'valid'),
('account_id.reconcile', '=', True),
('id', 'not in', excluded_ids)]
return domain
def get_reconciliation_proposition(self, cr, uid, st_line, excluded_ids=None, context=None):
""" Returns move lines that constitute the best guess to reconcile a statement line. """
mv_line_pool = self.pool.get('account.move.line')
# Look for structured communication
if st_line.name:
domain = self._domain_reconciliation_proposition(cr, uid, st_line, excluded_ids=excluded_ids, context=context)
match_id = mv_line_pool.search(cr, uid, domain, offset=0, limit=2, context=context)
if match_id and len(match_id) == 1:
mv_line_br = mv_line_pool.browse(cr, uid, match_id, context=context)
target_currency = st_line.currency_id or st_line.journal_id.currency or st_line.journal_id.company_id.currency_id
mv_line = mv_line_pool.prepare_move_lines_for_reconciliation_widget(cr, uid, mv_line_br, target_currency=target_currency, target_date=st_line.date, context=context)[0]
mv_line['has_no_partner'] = not bool(st_line.partner_id.id)
# If the structured communication matches a move line that is associated with a partner, we can safely associate the statement line with the partner
if (mv_line['partner_id']):
self.write(cr, uid, st_line.id, {'partner_id': mv_line['partner_id']}, context=context)
mv_line['has_no_partner'] = False
return [mv_line]
# How to compare statement line amount and move lines amount
precision_digits = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
currency_id = st_line.currency_id.id or st_line.journal_id.currency.id
# NB : amount can't be == 0 ; so float precision is not an issue for amount > 0 or amount < 0
amount = st_line.amount_currency or st_line.amount
domain = [('reconcile_partial_id', '=', False)]
if currency_id:
domain += [('currency_id', '=', currency_id)]
sign = 1 # correct the fact that st_line.amount is signed and debit/credit is not
amount_field = 'debit'
if currency_id == False:
if amount < 0:
amount_field = 'credit'
sign = -1
else:
amount_field = 'amount_currency'
# Look for a matching amount
domain_exact_amount = domain + [(amount_field, '=', float_round(sign * amount, precision_digits=precision_digits))]
match_id = self.get_move_lines_for_reconciliation(cr, uid, st_line, excluded_ids=excluded_ids, offset=0, limit=2, additional_domain=domain_exact_amount)
if match_id and len(match_id) == 1:
return match_id
if not st_line.partner_id.id:
return []
# Look for a set of move line whose amount is <= to the line's amount
if amount > 0: # Make sure we can't mix receivable and payable
domain += [('account_id.type', '=', 'receivable')]
else:
domain += [('account_id.type', '=', 'payable')]
if amount_field == 'amount_currency' and amount < 0:
domain += [(amount_field, '<', 0), (amount_field, '>', (sign * amount))]
else:
domain += [(amount_field, '>', 0), (amount_field, '<', (sign * amount))]
mv_lines = self.get_move_lines_for_reconciliation(cr, uid, st_line, excluded_ids=excluded_ids, limit=5, additional_domain=domain, context=context)
ret = []
total = 0
for line in mv_lines:
total += abs(line['debit'] - line['credit'])
if float_compare(total, abs(amount), precision_digits=precision_digits) != 1:
ret.append(line)
else:
break
return ret
def get_move_lines_for_reconciliation_by_statement_line_id(self, cr, uid, st_line_id, excluded_ids=None, str=False, offset=0, limit=None, count=False, additional_domain=None, context=None):
""" Bridge between the web client reconciliation widget and get_move_lines_for_reconciliation (which expects a browse record) """
if excluded_ids is None:
excluded_ids = []
if additional_domain is None:
additional_domain = []
st_line = self.browse(cr, uid, st_line_id, context=context)
return self.get_move_lines_for_reconciliation(cr, uid, st_line, excluded_ids, str, offset, limit, count, additional_domain, context=context)
def _domain_move_lines_for_reconciliation(self, cr, uid, st_line, excluded_ids=None, str=False, additional_domain=None, context=None):
if excluded_ids is None:
excluded_ids = []
if additional_domain is None:
additional_domain = []
# Make domain
domain = additional_domain + [
('reconcile_id', '=', False),
('state', '=', 'valid'),
('account_id.reconcile', '=', True)
]
if st_line.partner_id.id:
domain += [('partner_id', '=', st_line.partner_id.id)]
if excluded_ids:
domain.append(('id', 'not in', excluded_ids))
if str:
domain += [
'|', ('move_id.name', 'ilike', str),
'|', ('move_id.ref', 'ilike', str),
('date_maturity', 'like', str),
]
if not st_line.partner_id.id:
domain.insert(-1, '|', )
domain.append(('partner_id.name', 'ilike', str))
if str != '/':
domain.insert(-1, '|', )
domain.append(('name', 'ilike', str))
return domain
def get_move_lines_for_reconciliation(self, cr, uid, st_line, excluded_ids=None, str=False, offset=0, limit=None, count=False, additional_domain=None, context=None):
""" Find the move lines that could be used to reconcile a statement line. If count is true, only returns the count.
:param st_line: the browse record of the statement line
:param integers list excluded_ids: ids of move lines that should not be fetched
:param boolean count: just return the number of records
:param tuples list additional_domain: additional domain restrictions
"""
mv_line_pool = self.pool.get('account.move.line')
domain = self._domain_move_lines_for_reconciliation(cr, uid, st_line, excluded_ids=excluded_ids, str=str, additional_domain=additional_domain, context=context)
# Get move lines ; in case of a partial reconciliation, only keep one line (the first whose amount is greater than
# the residual amount because it is presumably the invoice, which is the relevant item in this situation)
filtered_lines = []
reconcile_partial_ids = []
actual_offset = offset
while True:
line_ids = mv_line_pool.search(cr, uid, domain, offset=actual_offset, limit=limit, order="date_maturity asc, id asc", context=context)
lines = mv_line_pool.browse(cr, uid, line_ids, context=context)
make_one_more_loop = False
for line in lines:
if line.reconcile_partial_id and \
(line.reconcile_partial_id.id in reconcile_partial_ids or \
abs(line.debit - line.credit) < abs(line.amount_residual)):
#if we filtered a line because it is partially reconciled with an already selected line, we must do one more loop
#in order to get the right number of items in the pager
make_one_more_loop = True
continue
filtered_lines.append(line)
if line.reconcile_partial_id:
reconcile_partial_ids.append(line.reconcile_partial_id.id)
if not limit or not make_one_more_loop or len(filtered_lines) >= limit:
break
actual_offset = actual_offset + limit
lines = limit and filtered_lines[:limit] or filtered_lines
# Either return number of lines
if count:
return len(lines)
# Or return list of dicts representing the formatted move lines
else:
target_currency = st_line.currency_id or st_line.journal_id.currency or st_line.journal_id.company_id.currency_id
mv_lines = mv_line_pool.prepare_move_lines_for_reconciliation_widget(cr, uid, lines, target_currency=target_currency, target_date=st_line.date, context=context)
has_no_partner = not bool(st_line.partner_id.id)
for line in mv_lines:
line['has_no_partner'] = has_no_partner
return mv_lines
def get_currency_rate_line(self, cr, uid, st_line, currency_diff, move_id, context=None):
if currency_diff < 0:
account_id = st_line.company_id.expense_currency_exchange_account_id.id
if not account_id:
raise osv.except_osv(_('Insufficient Configuration!'), _("You should configure the 'Loss Exchange Rate Account' in the accounting settings, to manage automatically the booking of accounting entries related to differences between exchange rates."))
else:
account_id = st_line.company_id.income_currency_exchange_account_id.id
if not account_id:
raise osv.except_osv(_('Insufficient Configuration!'), _("You should configure the 'Gain Exchange Rate Account' in the accounting settings, to manage automatically the booking of accounting entries related to differences between exchange rates."))
return {
'move_id': move_id,
'name': _('change') + ': ' + (st_line.name or '/'),
'period_id': st_line.statement_id.period_id.id,
'journal_id': st_line.journal_id.id,
'partner_id': st_line.partner_id.id,
'company_id': st_line.company_id.id,
'statement_id': st_line.statement_id.id,
'debit': currency_diff < 0 and -currency_diff or 0,
'credit': currency_diff > 0 and currency_diff or 0,
'amount_currency': 0.0,
'date': st_line.date,
'account_id': account_id
}
def _get_exchange_lines(self, cr, uid, st_line, mv_line, currency_diff, currency_id, move_id, context=None):
'''
Prepare the two lines in company currency due to currency rate difference.
:param line: browse record of the voucher.line for which we want to create currency rate difference accounting
entries
:param move_id: Account move wher the move lines will be.
:param currency_diff: Amount to be posted.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: the account move line and its counterpart to create, depicted as mapping between fieldname and value
:rtype: tuple of dict
'''
if currency_diff > 0:
exchange_account_id = st_line.company_id.expense_currency_exchange_account_id.id
else:
exchange_account_id = st_line.company_id.income_currency_exchange_account_id.id
# Even if the amount_currency is never filled, we need to pass the foreign currency because otherwise
# the receivable/payable account may have a secondary currency, which render this field mandatory
if mv_line.account_id.currency_id:
account_currency_id = mv_line.account_id.currency_id.id
else:
account_currency_id = st_line.company_id.currency_id.id != currency_id and currency_id or False
move_line = {
'journal_id': st_line.journal_id.id,
'period_id': st_line.statement_id.period_id.id,
'name': _('change') + ': ' + (st_line.name or '/'),
'account_id': mv_line.account_id.id,
'move_id': move_id,
'partner_id': st_line.partner_id.id,
'currency_id': account_currency_id,
'amount_currency': 0.0,
'quantity': 1,
'credit': currency_diff > 0 and currency_diff or 0.0,
'debit': currency_diff < 0 and -currency_diff or 0.0,
'date': st_line.date,
'counterpart_move_line_id': mv_line.id,
}
move_line_counterpart = {
'journal_id': st_line.journal_id.id,
'period_id': st_line.statement_id.period_id.id,
'name': _('change') + ': ' + (st_line.name or '/'),
'account_id': exchange_account_id,
'move_id': move_id,
'amount_currency': 0.0,
'partner_id': st_line.partner_id.id,
'currency_id': account_currency_id,
'quantity': 1,
'debit': currency_diff > 0 and currency_diff or 0.0,
'credit': currency_diff < 0 and -currency_diff or 0.0,
'date': st_line.date,
}
return (move_line, move_line_counterpart)
def process_reconciliations(self, cr, uid, data, context=None):
for datum in data:
self.process_reconciliation(cr, uid, datum[0], datum[1], context=context)
def process_reconciliation(self, cr, uid, id, mv_line_dicts, context=None):
""" Creates a move line for each item of mv_line_dicts and for the statement line. Reconcile a new move line with its counterpart_move_line_id if specified. Finally, mark the statement line as reconciled by putting the newly created move id in the column journal_entry_id.
:param int id: id of the bank statement line
:param list of dicts mv_line_dicts: move lines to create. If counterpart_move_line_id is specified, reconcile with it
"""
if context is None:
context = {}
st_line = self.browse(cr, uid, id, context=context)
company_currency = st_line.journal_id.company_id.currency_id
statement_currency = st_line.journal_id.currency or company_currency
bs_obj = self.pool.get('account.bank.statement')
am_obj = self.pool.get('account.move')
aml_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
# Checks
if st_line.journal_entry_id.id:
raise osv.except_osv(_('Error!'), _('The bank statement line was already reconciled.'))
for mv_line_dict in mv_line_dicts:
for field in ['debit', 'credit', 'amount_currency']:
if field not in mv_line_dict:
mv_line_dict[field] = 0.0
if mv_line_dict.get('counterpart_move_line_id'):
mv_line = aml_obj.browse(cr, uid, mv_line_dict.get('counterpart_move_line_id'), context=context)
if mv_line.reconcile_id:
raise osv.except_osv(_('Error!'), _('A selected move line was already reconciled.'))
# Create the move
move_name = (st_line.statement_id.name or st_line.name) + "/" + str(st_line.sequence)
move_vals = bs_obj._prepare_move(cr, uid, st_line, move_name, context=context)
move_id = am_obj.create(cr, uid, move_vals, context=context)
# Create the move line for the statement line
if st_line.statement_id.currency.id != company_currency.id:
if st_line.currency_id == company_currency:
amount = st_line.amount_currency
else:
ctx = context.copy()
ctx['date'] = st_line.date
amount = currency_obj.compute(cr, uid, st_line.statement_id.currency.id, company_currency.id, st_line.amount, context=ctx)
else:
amount = st_line.amount
bank_st_move_vals = bs_obj._prepare_bank_move_line(cr, uid, st_line, move_id, amount, company_currency.id, context=context)
aml_obj.create(cr, uid, bank_st_move_vals, context=context)
# Complete the dicts
st_line_currency = st_line.currency_id or statement_currency
st_line_currency_rate = st_line.currency_id and (st_line.amount_currency / st_line.amount) or False
to_create = []
for mv_line_dict in mv_line_dicts:
if mv_line_dict.get('is_tax_line'):
continue
mv_line_dict['ref'] = move_name
mv_line_dict['move_id'] = move_id
mv_line_dict['period_id'] = st_line.statement_id.period_id.id
mv_line_dict['journal_id'] = st_line.journal_id.id
mv_line_dict['company_id'] = st_line.company_id.id
mv_line_dict['statement_id'] = st_line.statement_id.id
if mv_line_dict.get('counterpart_move_line_id'):
mv_line = aml_obj.browse(cr, uid, mv_line_dict['counterpart_move_line_id'], context=context)
mv_line_dict['partner_id'] = mv_line.partner_id.id or st_line.partner_id.id
mv_line_dict['account_id'] = mv_line.account_id.id
if st_line_currency.id != company_currency.id:
ctx = context.copy()
ctx['date'] = st_line.date
mv_line_dict['amount_currency'] = mv_line_dict['debit'] - mv_line_dict['credit']
mv_line_dict['currency_id'] = st_line_currency.id
if st_line.currency_id and statement_currency.id == company_currency.id and st_line_currency_rate:
debit_at_current_rate = self.pool.get('res.currency').round(cr, uid, company_currency, mv_line_dict['debit'] / st_line_currency_rate)
credit_at_current_rate = self.pool.get('res.currency').round(cr, uid, company_currency, mv_line_dict['credit'] / st_line_currency_rate)
elif st_line.currency_id and st_line_currency_rate:
debit_at_current_rate = currency_obj.compute(cr, uid, statement_currency.id, company_currency.id, mv_line_dict['debit'] / st_line_currency_rate, context=ctx)
credit_at_current_rate = currency_obj.compute(cr, uid, statement_currency.id, company_currency.id, mv_line_dict['credit'] / st_line_currency_rate, context=ctx)
else:
debit_at_current_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['debit'], context=ctx)
credit_at_current_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['credit'], context=ctx)
if mv_line_dict.get('counterpart_move_line_id'):
#post an account line that use the same currency rate than the counterpart (to balance the account) and post the difference in another line
ctx['date'] = mv_line.date
if mv_line.currency_id.id == mv_line_dict['currency_id'] \
and float_is_zero(abs(mv_line.amount_currency) - abs(mv_line_dict['amount_currency']), precision_rounding=mv_line.currency_id.rounding):
debit_at_old_rate = mv_line.credit
credit_at_old_rate = mv_line.debit
else:
debit_at_old_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['debit'], context=ctx)
credit_at_old_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['credit'], context=ctx)
mv_line_dict['credit'] = credit_at_old_rate
mv_line_dict['debit'] = debit_at_old_rate
if debit_at_old_rate - debit_at_current_rate:
currency_diff = debit_at_current_rate - debit_at_old_rate
to_create.append(self.get_currency_rate_line(cr, uid, st_line, -currency_diff, move_id, context=context))
if credit_at_old_rate - credit_at_current_rate:
currency_diff = credit_at_current_rate - credit_at_old_rate
to_create.append(self.get_currency_rate_line(cr, uid, st_line, currency_diff, move_id, context=context))
if mv_line.currency_id and mv_line_dict['currency_id'] == mv_line.currency_id.id:
amount_unreconciled = mv_line.amount_residual_currency
else:
amount_unreconciled = currency_obj.compute(cr, uid, company_currency.id, mv_line_dict['currency_id'] , mv_line.amount_residual, context=ctx)
if float_is_zero(mv_line_dict['amount_currency'] + amount_unreconciled, precision_rounding=mv_line.currency_id.rounding):
amount = mv_line_dict['debit'] or mv_line_dict['credit']
sign = -1 if mv_line_dict['debit'] else 1
currency_rate_difference = sign * (mv_line.amount_residual - amount)
if not company_currency.is_zero(currency_rate_difference):
exchange_lines = self._get_exchange_lines(cr, uid, st_line, mv_line, currency_rate_difference, mv_line_dict['currency_id'], move_id, context=context)
for exchange_line in exchange_lines:
to_create.append(exchange_line)
else:
mv_line_dict['debit'] = debit_at_current_rate
mv_line_dict['credit'] = credit_at_current_rate
elif statement_currency.id != company_currency.id:
#statement is in foreign currency but the transaction is in company currency
prorata_factor = (mv_line_dict['debit'] - mv_line_dict['credit']) / st_line.amount_currency
mv_line_dict['amount_currency'] = prorata_factor * st_line.amount
to_create.append(mv_line_dict)
# If the reconciliation is performed in another currency than the company currency, the amounts are converted to get the right debit/credit.
# If there is more than 1 debit and 1 credit, this can induce a rounding error, which we put in the foreign exchane gain/loss account.
if st_line_currency.id != company_currency.id:
diff_amount = bank_st_move_vals['debit'] - bank_st_move_vals['credit'] \
+ sum(aml['debit'] for aml in to_create) - sum(aml['credit'] for aml in to_create)
if not company_currency.is_zero(diff_amount):
diff_aml = self.get_currency_rate_line(cr, uid, st_line, diff_amount, move_id, context=context)
diff_aml['name'] = _('Rounding error from currency conversion')
to_create.append(diff_aml)
# Create move lines
move_line_pairs_to_reconcile = []
for mv_line_dict in to_create:
counterpart_move_line_id = None # NB : this attribute is irrelevant for aml_obj.create() and needs to be removed from the dict
if mv_line_dict.get('counterpart_move_line_id'):
counterpart_move_line_id = mv_line_dict['counterpart_move_line_id']
del mv_line_dict['counterpart_move_line_id']
new_aml_id = aml_obj.create(cr, uid, mv_line_dict, context=context)
if counterpart_move_line_id != None:
move_line_pairs_to_reconcile.append([new_aml_id, counterpart_move_line_id])
# Reconcile
for pair in move_line_pairs_to_reconcile:
aml_obj.reconcile_partial(cr, uid, pair, context=context)
# Mark the statement line as reconciled
self.write(cr, uid, id, {'journal_entry_id': move_id}, context=context)
# FIXME : if it wasn't for the multicompany security settings in account_security.xml, the method would just
# return [('journal_entry_id', '=', False)]
# Unfortunately, that spawns a "no access rights" error ; it shouldn't.
def _needaction_domain_get(self, cr, uid, context=None):
user = self.pool.get("res.users").browse(cr, uid, uid)
return ['|', ('company_id', '=', False), ('company_id', 'child_of', [user.company_id.id]), ('journal_entry_id', '=', False), ('account_id', '=', False)]
_order = "statement_id desc, sequence"
_name = "account.bank.statement.line"
_description = "Bank Statement Line"
_inherit = ['ir.needaction_mixin']
_columns = {
'name': fields.char('Communication', required=True),
'date': fields.date('Date', required=True),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
'partner_id': fields.many2one('res.partner', 'Partner'),
'bank_account_id': fields.many2one('res.partner.bank','Bank Account'),
'account_id': fields.many2one('account.account', 'Account', help="This technical field can be used at the statement line creation/import time in order to avoid the reconciliation process on it later on. The statement line will simply create a counterpart on this account"),
'statement_id': fields.many2one('account.bank.statement', 'Statement', select=True, required=True, ondelete='restrict'),
'journal_id': fields.related('statement_id', 'journal_id', type='many2one', relation='account.journal', string='Journal', store=True, readonly=True),
'partner_name': fields.char('Partner Name', help="This field is used to record the third party name when importing bank statement in electronic format, when the partner doesn't exist yet in the database (or cannot be found)."),
'ref': fields.char('Reference'),
'note': fields.text('Notes'),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of bank statement lines."),
'company_id': fields.related('statement_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'journal_entry_id': fields.many2one('account.move', 'Journal Entry', copy=False),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
}
_defaults = {
'name': lambda self,cr,uid,context={}: self.pool.get('ir.sequence').get(cr, uid, 'account.bank.statement.line'),
'date': lambda self,cr,uid,context={}: context.get('date', fields.date.context_today(self,cr,uid,context=context)),
}
class account_statement_operation_template(osv.osv):
_name = "account.statement.operation.template"
_description = "Preset for the lines that can be created in a bank statement reconciliation"
_columns = {
'name': fields.char('Button Label', required=True),
'account_id': fields.many2one('account.account', 'Account', ondelete='cascade', domain=[('type', 'not in', ('view', 'closed', 'consolidation'))]),
'label': fields.char('Label'),
'amount_type': fields.selection([('fixed', 'Fixed'),('percentage_of_total','Percentage of total amount'),('percentage_of_balance', 'Percentage of open balance')],
'Amount type', required=True),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account'), help="The amount will count as a debit if it is negative, as a credit if it is positive (except if amount type is 'Percentage of open balance').", required=True),
'tax_id': fields.many2one('account.tax', 'Tax', ondelete='restrict', domain=[('type_tax_use', 'in', ['purchase', 'all']), ('parent_id', '=', False)]),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', ondelete='set null', domain=[('type','!=','view'), ('state','not in',('close','cancelled'))]),
}
_defaults = {
'amount_type': 'percentage_of_balance',
'amount': 100.0
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
skwbc/numpy | numpy/testing/tests/test_decorators.py | 38 | 4305 | from __future__ import division, absolute_import, print_function
import warnings
from numpy.testing import (dec, assert_, assert_raises, run_module_suite,
SkipTest, KnownFailureException)
def test_slow():
@dec.slow
def slow_func(x, y, z):
pass
assert_(slow_func.slow)
def test_setastest():
@dec.setastest()
def f_default(a):
pass
@dec.setastest(True)
def f_istest(a):
pass
@dec.setastest(False)
def f_isnottest(a):
pass
assert_(f_default.__test__)
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
class DidntSkipException(Exception):
pass
def test_skip_functions_hardcoded():
@dec.skipif(True)
def f1(x):
raise DidntSkipException
try:
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except SkipTest:
pass
@dec.skipif(False)
def f2(x):
raise DidntSkipException
try:
f2('a')
except DidntSkipException:
pass
except SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_functions_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.skipif(skip_tester)
def f1(x):
raise DidntSkipException
try:
skip_flag = 'skip me!'
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except SkipTest:
pass
@dec.skipif(skip_tester)
def f2(x):
raise DidntSkipException
try:
skip_flag = 'five is right out!'
f2('a')
except DidntSkipException:
pass
except SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_generators_hardcoded():
@dec.knownfailureif(True, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
for j in g1(10):
pass
except KnownFailureException:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
for j in g2(10):
pass
except KnownFailureException:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_skip_generators_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
skip_flag = 'skip me!'
for j in g1(10):
pass
except KnownFailureException:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
skip_flag = 'do not skip'
for j in g2(10):
pass
except KnownFailureException:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_deprecated():
@dec.deprecated(True)
def non_deprecated_func():
pass
@dec.deprecated()
def deprecated_func():
import warnings
warnings.warn("TEST: deprecated func", DeprecationWarning)
@dec.deprecated()
def deprecated_func2():
import warnings
warnings.warn("AHHHH")
raise ValueError
@dec.deprecated()
def deprecated_func3():
import warnings
warnings.warn("AHHHH")
# marked as deprecated, but does not raise DeprecationWarning
assert_raises(AssertionError, non_deprecated_func)
# should be silent
deprecated_func()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # do not propagate unrelated warnings
# fails if deprecated decorator just disables test. See #1453.
assert_raises(ValueError, deprecated_func2)
# warning is not a DeprecationWarning
assert_raises(AssertionError, deprecated_func3)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
2014c2g5/2014c2 | w2/static/Brython2.0.0-20140209-164925/Lib/logging/config.py | 739 | 35619 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, traceback, re
import io
try:
import _thread as thread
import threading
except ImportError: #pragma: no cover
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
else:
logger.disabled = disable_existing
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
style = config.get('style', '%')
result = logging.Formatter(fmt, dfmt, style)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
config_copy = dict(config) # for restoring in case of error
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config.update(config_copy) # restore for deferred cfg
raise TypeError('target not configured yet')
config['target'] = th
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread: #pragma: no cover
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
| gpl-2.0 |
moijes12/oh-mainline | vendor/packages/twisted/twisted/test/test_lockfile.py | 59 | 15404 | # Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.lockfile}.
"""
import os, errno
from twisted.trial import unittest
from twisted.python import lockfile
from twisted.python.runtime import platform
skipKill = None
if platform.isWindows():
try:
from win32api import OpenProcess
import pywintypes
except ImportError:
skipKill = ("On windows, lockfile.kill is not implemented in the "
"absence of win32api and/or pywintypes.")
class UtilTests(unittest.TestCase):
"""
Tests for the helper functions used to implement L{FilesystemLock}.
"""
def test_symlinkEEXIST(self):
"""
L{lockfile.symlink} raises L{OSError} with C{errno} set to L{EEXIST}
when an attempt is made to create a symlink which already exists.
"""
name = self.mktemp()
lockfile.symlink('foo', name)
exc = self.assertRaises(OSError, lockfile.symlink, 'foo', name)
self.assertEqual(exc.errno, errno.EEXIST)
def test_symlinkEIOWindows(self):
"""
L{lockfile.symlink} raises L{OSError} with C{errno} set to L{EIO} when
the underlying L{rename} call fails with L{EIO}.
Renaming a file on Windows may fail if the target of the rename is in
the process of being deleted (directory deletion appears not to be
atomic).
"""
name = self.mktemp()
def fakeRename(src, dst):
raise IOError(errno.EIO, None)
self.patch(lockfile, 'rename', fakeRename)
exc = self.assertRaises(IOError, lockfile.symlink, name, "foo")
self.assertEqual(exc.errno, errno.EIO)
if not platform.isWindows():
test_symlinkEIOWindows.skip = (
"special rename EIO handling only necessary and correct on "
"Windows.")
def test_readlinkENOENT(self):
"""
L{lockfile.readlink} raises L{OSError} with C{errno} set to L{ENOENT}
when an attempt is made to read a symlink which does not exist.
"""
name = self.mktemp()
exc = self.assertRaises(OSError, lockfile.readlink, name)
self.assertEqual(exc.errno, errno.ENOENT)
def test_readlinkEACCESWindows(self):
"""
L{lockfile.readlink} raises L{OSError} with C{errno} set to L{EACCES}
on Windows when the underlying file open attempt fails with C{EACCES}.
Opening a file on Windows may fail if the path is inside a directory
which is in the process of being deleted (directory deletion appears
not to be atomic).
"""
name = self.mktemp()
def fakeOpen(path, mode):
raise IOError(errno.EACCES, None)
self.patch(lockfile, '_open', fakeOpen)
exc = self.assertRaises(IOError, lockfile.readlink, name)
self.assertEqual(exc.errno, errno.EACCES)
if not platform.isWindows():
test_readlinkEACCESWindows.skip = (
"special readlink EACCES handling only necessary and correct on "
"Windows.")
def test_kill(self):
"""
L{lockfile.kill} returns without error if passed the PID of a
process which exists and signal C{0}.
"""
lockfile.kill(os.getpid(), 0)
test_kill.skip = skipKill
def test_killESRCH(self):
"""
L{lockfile.kill} raises L{OSError} with errno of L{ESRCH} if
passed a PID which does not correspond to any process.
"""
# Hopefully there is no process with PID 2 ** 31 - 1
exc = self.assertRaises(OSError, lockfile.kill, 2 ** 31 - 1, 0)
self.assertEqual(exc.errno, errno.ESRCH)
test_killESRCH.skip = skipKill
def test_noKillCall(self):
"""
Verify that when L{lockfile.kill} does end up as None (e.g. on Windows
without pywin32), it doesn't end up being called and raising a
L{TypeError}.
"""
self.patch(lockfile, "kill", None)
fl = lockfile.FilesystemLock(self.mktemp())
fl.lock()
self.assertFalse(fl.lock())
class LockingTestCase(unittest.TestCase):
def _symlinkErrorTest(self, errno):
def fakeSymlink(source, dest):
raise OSError(errno, None)
self.patch(lockfile, 'symlink', fakeSymlink)
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
exc = self.assertRaises(OSError, lock.lock)
self.assertEqual(exc.errno, errno)
def test_symlinkError(self):
"""
An exception raised by C{symlink} other than C{EEXIST} is passed up to
the caller of L{FilesystemLock.lock}.
"""
self._symlinkErrorTest(errno.ENOSYS)
def test_symlinkErrorPOSIX(self):
"""
An L{OSError} raised by C{symlink} on a POSIX platform with an errno of
C{EACCES} or C{EIO} is passed to the caller of L{FilesystemLock.lock}.
On POSIX, unlike on Windows, these are unexpected errors which cannot
be handled by L{FilesystemLock}.
"""
self._symlinkErrorTest(errno.EACCES)
self._symlinkErrorTest(errno.EIO)
if platform.isWindows():
test_symlinkErrorPOSIX.skip = (
"POSIX-specific error propagation not expected on Windows.")
def test_cleanlyAcquire(self):
"""
If the lock has never been held, it can be acquired and the C{clean}
and C{locked} attributes are set to C{True}.
"""
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
self.assertTrue(lock.lock())
self.assertTrue(lock.clean)
self.assertTrue(lock.locked)
def test_cleanlyRelease(self):
"""
If a lock is released cleanly, it can be re-acquired and the C{clean}
and C{locked} attributes are set to C{True}.
"""
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
self.assertTrue(lock.lock())
lock.unlock()
self.assertFalse(lock.locked)
lock = lockfile.FilesystemLock(lockf)
self.assertTrue(lock.lock())
self.assertTrue(lock.clean)
self.assertTrue(lock.locked)
def test_cannotLockLocked(self):
"""
If a lock is currently locked, it cannot be locked again.
"""
lockf = self.mktemp()
firstLock = lockfile.FilesystemLock(lockf)
self.assertTrue(firstLock.lock())
secondLock = lockfile.FilesystemLock(lockf)
self.assertFalse(secondLock.lock())
self.assertFalse(secondLock.locked)
def test_uncleanlyAcquire(self):
"""
If a lock was held by a process which no longer exists, it can be
acquired, the C{clean} attribute is set to C{False}, and the
C{locked} attribute is set to C{True}.
"""
owner = 12345
def fakeKill(pid, signal):
if signal != 0:
raise OSError(errno.EPERM, None)
if pid == owner:
raise OSError(errno.ESRCH, None)
lockf = self.mktemp()
self.patch(lockfile, 'kill', fakeKill)
lockfile.symlink(str(owner), lockf)
lock = lockfile.FilesystemLock(lockf)
self.assertTrue(lock.lock())
self.assertFalse(lock.clean)
self.assertTrue(lock.locked)
self.assertEqual(lockfile.readlink(lockf), str(os.getpid()))
def test_lockReleasedBeforeCheck(self):
"""
If the lock is initially held but then released before it can be
examined to determine if the process which held it still exists, it is
acquired and the C{clean} and C{locked} attributes are set to C{True}.
"""
def fakeReadlink(name):
# Pretend to be another process releasing the lock.
lockfile.rmlink(lockf)
# Fall back to the real implementation of readlink.
readlinkPatch.restore()
return lockfile.readlink(name)
readlinkPatch = self.patch(lockfile, 'readlink', fakeReadlink)
def fakeKill(pid, signal):
if signal != 0:
raise OSError(errno.EPERM, None)
if pid == 43125:
raise OSError(errno.ESRCH, None)
self.patch(lockfile, 'kill', fakeKill)
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
lockfile.symlink(str(43125), lockf)
self.assertTrue(lock.lock())
self.assertTrue(lock.clean)
self.assertTrue(lock.locked)
def test_lockReleasedDuringAcquireSymlink(self):
"""
If the lock is released while an attempt is made to acquire
it, the lock attempt fails and C{FilesystemLock.lock} returns
C{False}. This can happen on Windows when L{lockfile.symlink}
fails with L{IOError} of C{EIO} because another process is in
the middle of a call to L{os.rmdir} (implemented in terms of
RemoveDirectory) which is not atomic.
"""
def fakeSymlink(src, dst):
# While another process id doing os.rmdir which the Windows
# implementation of rmlink does, a rename call will fail with EIO.
raise OSError(errno.EIO, None)
self.patch(lockfile, 'symlink', fakeSymlink)
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
self.assertFalse(lock.lock())
self.assertFalse(lock.locked)
if not platform.isWindows():
test_lockReleasedDuringAcquireSymlink.skip = (
"special rename EIO handling only necessary and correct on "
"Windows.")
def test_lockReleasedDuringAcquireReadlink(self):
"""
If the lock is initially held but is released while an attempt
is made to acquire it, the lock attempt fails and
L{FilesystemLock.lock} returns C{False}.
"""
def fakeReadlink(name):
# While another process is doing os.rmdir which the
# Windows implementation of rmlink does, a readlink call
# will fail with EACCES.
raise IOError(errno.EACCES, None)
readlinkPatch = self.patch(lockfile, 'readlink', fakeReadlink)
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
lockfile.symlink(str(43125), lockf)
self.assertFalse(lock.lock())
self.assertFalse(lock.locked)
if not platform.isWindows():
test_lockReleasedDuringAcquireReadlink.skip = (
"special readlink EACCES handling only necessary and correct on "
"Windows.")
def _readlinkErrorTest(self, exceptionType, errno):
def fakeReadlink(name):
raise exceptionType(errno, None)
self.patch(lockfile, 'readlink', fakeReadlink)
lockf = self.mktemp()
# Make it appear locked so it has to use readlink
lockfile.symlink(str(43125), lockf)
lock = lockfile.FilesystemLock(lockf)
exc = self.assertRaises(exceptionType, lock.lock)
self.assertEqual(exc.errno, errno)
self.assertFalse(lock.locked)
def test_readlinkError(self):
"""
An exception raised by C{readlink} other than C{ENOENT} is passed up to
the caller of L{FilesystemLock.lock}.
"""
self._readlinkErrorTest(OSError, errno.ENOSYS)
self._readlinkErrorTest(IOError, errno.ENOSYS)
def test_readlinkErrorPOSIX(self):
"""
Any L{IOError} raised by C{readlink} on a POSIX platform passed to the
caller of L{FilesystemLock.lock}.
On POSIX, unlike on Windows, these are unexpected errors which cannot
be handled by L{FilesystemLock}.
"""
self._readlinkErrorTest(IOError, errno.ENOSYS)
self._readlinkErrorTest(IOError, errno.EACCES)
if platform.isWindows():
test_readlinkErrorPOSIX.skip = (
"POSIX-specific error propagation not expected on Windows.")
def test_lockCleanedUpConcurrently(self):
"""
If a second process cleans up the lock after a first one checks the
lock and finds that no process is holding it, the first process does
not fail when it tries to clean up the lock.
"""
def fakeRmlink(name):
rmlinkPatch.restore()
# Pretend to be another process cleaning up the lock.
lockfile.rmlink(lockf)
# Fall back to the real implementation of rmlink.
return lockfile.rmlink(name)
rmlinkPatch = self.patch(lockfile, 'rmlink', fakeRmlink)
def fakeKill(pid, signal):
if signal != 0:
raise OSError(errno.EPERM, None)
if pid == 43125:
raise OSError(errno.ESRCH, None)
self.patch(lockfile, 'kill', fakeKill)
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
lockfile.symlink(str(43125), lockf)
self.assertTrue(lock.lock())
self.assertTrue(lock.clean)
self.assertTrue(lock.locked)
def test_rmlinkError(self):
"""
An exception raised by L{rmlink} other than C{ENOENT} is passed up
to the caller of L{FilesystemLock.lock}.
"""
def fakeRmlink(name):
raise OSError(errno.ENOSYS, None)
self.patch(lockfile, 'rmlink', fakeRmlink)
def fakeKill(pid, signal):
if signal != 0:
raise OSError(errno.EPERM, None)
if pid == 43125:
raise OSError(errno.ESRCH, None)
self.patch(lockfile, 'kill', fakeKill)
lockf = self.mktemp()
# Make it appear locked so it has to use readlink
lockfile.symlink(str(43125), lockf)
lock = lockfile.FilesystemLock(lockf)
exc = self.assertRaises(OSError, lock.lock)
self.assertEqual(exc.errno, errno.ENOSYS)
self.assertFalse(lock.locked)
def test_killError(self):
"""
If L{kill} raises an exception other than L{OSError} with errno set to
C{ESRCH}, the exception is passed up to the caller of
L{FilesystemLock.lock}.
"""
def fakeKill(pid, signal):
raise OSError(errno.EPERM, None)
self.patch(lockfile, 'kill', fakeKill)
lockf = self.mktemp()
# Make it appear locked so it has to use readlink
lockfile.symlink(str(43125), lockf)
lock = lockfile.FilesystemLock(lockf)
exc = self.assertRaises(OSError, lock.lock)
self.assertEqual(exc.errno, errno.EPERM)
self.assertFalse(lock.locked)
def test_unlockOther(self):
"""
L{FilesystemLock.unlock} raises L{ValueError} if called for a lock
which is held by a different process.
"""
lockf = self.mktemp()
lockfile.symlink(str(os.getpid() + 1), lockf)
lock = lockfile.FilesystemLock(lockf)
self.assertRaises(ValueError, lock.unlock)
def test_isLocked(self):
"""
L{isLocked} returns C{True} if the named lock is currently locked,
C{False} otherwise.
"""
lockf = self.mktemp()
self.assertFalse(lockfile.isLocked(lockf))
lock = lockfile.FilesystemLock(lockf)
self.assertTrue(lock.lock())
self.assertTrue(lockfile.isLocked(lockf))
lock.unlock()
self.assertFalse(lockfile.isLocked(lockf))
| agpl-3.0 |
cainiaocome/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
openmotics/gateway | tools/validate_p1.py | 1 | 2517 | #!/bin/python2
# Copyright (C) 2021 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from serial.serialposix import Serial
def watch(port):
try:
previous_output = None
serial = Serial(port, 115200, timeout=2)
count = 0
while True:
count += 1
output = _read(serial).strip()
if output != previous_output:
_print_diff(previous_output if previous_output is not None else output,
output)
previous_output = output
sys.stdout.write('Count: {0:04d}'.format(count))
sys.stdout.write('\rCount: {0:04d}'.format(count))
sys.stdout.flush()
except KeyboardInterrupt:
print('Exit')
def _print_diff(a_string, b_string):
output = ''
color_started = False
for i in range(max(len(a_string), len(b_string))):
a = a_string[i] if i < len(a_string) else '?'
b = b_string[i] if i < len(b_string) else '?'
if a != b:
if color_started is False:
output += '\033[101m'
color_started = True
else:
if color_started is True:
output += '\033[0m'
color_started = False
output += b
output += '\033[0m'
sys.stdout.write('\n\n{0}\n\n'.format(output))
sys.stdout.flush()
def _read(serial):
buffer = ''
new_data = serial.read(1)
while len(new_data) > 0:
buffer += new_data
if len(buffer) > 5 and buffer[-5] == '!':
return buffer
new_data = serial.read(1)
return buffer
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Validates correct communication with the P1 port')
print('Usage: ./validate_p1.py <port>')
print('Port is typically /dev/ttyO2')
sys.exit(1)
watch(sys.argv[1])
| agpl-3.0 |
thom-at-redhat/cfme_tests | cfme/tests/infrastructure/test_vm_clone.py | 1 | 5476 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme.common.provider import cleanup_vm
from cfme.common.vm import VM
from cfme.services.catalogs.catalog_item import CatalogItem
from cfme.automate.service_dialogs import ServiceDialog
from cfme.services.catalogs.catalog import Catalog
from cfme.services.catalogs.service_catalogs import ServiceCatalogs
from cfme.services import requests
from cfme.web_ui import flash
from utils.wait import wait_for
from utils import testgen
from utils.log import logger
from utils import version
pytestmark = [
pytest.mark.meta(roles="+automate")
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.infra_providers(
metafunc, 'provisioning', template_location=["provisioning", "template"])
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
if not args['provisioning']:
# No provisioning data available
continue
# required keys should be a subset of the dict keys set
if not {'template', 'host', 'datastore'}.issubset(args['provisioning'].viewkeys()):
# Need all three for template provisioning
continue
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.yield_fixture(scope="function")
def dialog():
dialog = "dialog_" + fauxfactory.gen_alphanumeric()
element_data = dict(
ele_label="ele_" + fauxfactory.gen_alphanumeric(),
ele_name=fauxfactory.gen_alphanumeric(),
ele_desc="my ele desc", choose_type="Text Box",
default_text_box="default value"
)
service_dialog = ServiceDialog(label=dialog, description="my dialog",
submit=True, cancel=True,
tab_label="tab_" + fauxfactory.gen_alphanumeric(), tab_desc="my tab desc",
box_label="box_" + fauxfactory.gen_alphanumeric(), box_desc="my box desc")
service_dialog.create(element_data)
flash.assert_success_message('Dialog "%s" was added' % dialog)
yield dialog
@pytest.yield_fixture(scope="function")
def catalog():
catalog = "cat_" + fauxfactory.gen_alphanumeric()
cat = Catalog(name=catalog,
description="my catalog")
cat.create()
yield catalog
@pytest.yield_fixture(scope="function")
def catalog_item(provider, provisioning, vm_name, dialog, catalog):
template, host, datastore, iso_file, catalog_item_type = map(provisioning.get,
('template', 'host', 'datastore', 'iso_file', 'catalog_item_type'))
provisioning_data = {
'vm_name': vm_name,
'host_name': {'name': [host]},
'datastore_name': {'name': [datastore]}
}
if provider.type == 'rhevm':
provisioning_data['provision_type'] = 'Native Clone'
provisioning_data['vlan'] = provisioning['vlan']
catalog_item_type = version.pick({
version.LATEST: "RHEV",
'5.3': "RHEV",
'5.2': "Redhat"
})
elif provider.type == 'virtualcenter':
provisioning_data['provision_type'] = 'VMware'
item_name = fauxfactory.gen_alphanumeric()
catalog_item = CatalogItem(item_type=catalog_item_type, name=item_name,
description="my catalog", display_in=True, catalog=catalog,
dialog=dialog, catalog_name=template,
provider=provider.name, prov_data=provisioning_data)
yield catalog_item
@pytest.fixture(scope="function")
def clone_vm_name():
clone_vm_name = 'test_cloning_{}'.format(fauxfactory.gen_alphanumeric())
return clone_vm_name
@pytest.fixture
def create_vm(provider, setup_provider, catalog_item, request):
vm_name = catalog_item.provisioning_data["vm_name"]
catalog_item.create()
service_catalogs = ServiceCatalogs("service_name")
service_catalogs.order(catalog_item.catalog, catalog_item)
flash.assert_no_errors()
logger.info('Waiting for cfme provision request for service %s' % catalog_item.name)
row_description = catalog_item.name
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells, True],
fail_func=requests.reload, num_sec=1400, delay=20)
assert row.last_message.text == 'Request complete'
return vm_name
@pytest.mark.meta(blockers=[1255190])
@pytest.mark.usefixtures("setup_provider")
@pytest.mark.uncollectif(version.appliance_is_downstream())
@pytest.mark.long_running
def test_vm_clone(provisioning, provider, clone_vm_name, request, create_vm):
vm_name = create_vm + "_0001"
request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
request.addfinalizer(lambda: cleanup_vm(clone_vm_name, provider))
vm = VM.factory(vm_name, provider)
if provider.type == 'rhevm':
provision_type = 'Native Clone'
elif provider.type == 'virtualcenter':
provision_type = 'VMware'
vm.clone_vm("email@xyz.com", "first", "last", clone_vm_name, provision_type)
row_description = clone_vm_name
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells, True],
fail_func=requests.reload, num_sec=4000, delay=20)
assert row.last_message.text == 'Vm Provisioned Successfully'
| gpl-2.0 |
uchida/selenium | py/selenium/webdriver/support/wait.py | 81 | 4070 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
POLL_FREQUENCY = 0.5 # How long to sleep inbetween calls to the method
IGNORED_EXCEPTIONS = (NoSuchElementException,) # exceptions ignored during calls to the method
class WebDriverWait(object):
def __init__(self, driver, timeout, poll_frequency=POLL_FREQUENCY, ignored_exceptions=None):
"""Constructor, takes a WebDriver instance and timeout in seconds.
:Args:
- driver - Instance of WebDriver (Ie, Firefox, Chrome or Remote)
- timeout - Number of seconds before timing out
- poll_frequency - sleep interval between calls
By default, it is 0.5 second.
- ignored_exceptions - iterable structure of exception classes ignored during calls.
By default, it contains NoSuchElementException only.
Example:
from selenium.webdriver.support.ui import WebDriverWait \n
element = WebDriverWait(driver, 10).until(lambda x: x.find_element_by_id("someId")) \n
is_disappeared = WebDriverWait(driver, 30, 1, (ElementNotVisibleException)).\ \n
until_not(lambda x: x.find_element_by_id("someId").is_displayed())
"""
self._driver = driver
self._timeout = timeout
self._poll = poll_frequency
# avoid the divide by zero
if self._poll == 0:
self._poll = POLL_FREQUENCY
exceptions = list(IGNORED_EXCEPTIONS)
if ignored_exceptions is not None:
try:
exceptions.extend(iter(ignored_exceptions))
except TypeError: # ignored_exceptions is not iterable
exceptions.append(ignored_exceptions)
self._ignored_exceptions = tuple(exceptions)
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}")>'.format(
type(self), self._driver.session_id)
def until(self, method, message=''):
"""Calls the method provided with the driver as an argument until the \
return value is not False."""
screen = None
stacktrace = None
end_time = time.time() + self._timeout
while True:
try:
value = method(self._driver)
if value:
return value
except self._ignored_exceptions as exc:
screen = getattr(exc, 'screen', None)
stacktrace = getattr(exc, 'stacktrace', None)
time.sleep(self._poll)
if time.time() > end_time:
break
raise TimeoutException(message, screen, stacktrace)
def until_not(self, method, message=''):
"""Calls the method provided with the driver as an argument until the \
return value is False."""
end_time = time.time() + self._timeout
while True:
try:
value = method(self._driver)
if not value:
return value
except self._ignored_exceptions:
return True
time.sleep(self._poll)
if time.time() > end_time:
break
raise TimeoutException(message)
| apache-2.0 |
DevinDewitt/pyqt5 | pyuic/uic/port_v3/string_io.py | 2 | 1060 | #############################################################################
##
## Copyright (c) 2015 Riverbank Computing Limited <info@riverbankcomputing.com>
##
## This file is part of PyQt5.
##
## This file may be used under the terms of the GNU General Public License
## version 3.0 as published by the Free Software Foundation and appearing in
## the file LICENSE included in the packaging of this file. Please review the
## following information to ensure the GNU General Public License version 3.0
## requirements will be met: http://www.gnu.org/copyleft/gpl.html.
##
## If you do not wish to use this file under the terms of the GPL version 3.0
## then you may purchase a commercial license. For more information contact
## info@riverbankcomputing.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
# Import the StringIO object.
from io import StringIO
| gpl-3.0 |
ibab/tensorflow | tensorflow/python/training/sync_replicas_optimizer.py | 8 | 22953 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synchronize replicas for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
class SyncReplicasOptimizer(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
summing them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following queues are created:
<empty line>
* N `gradient` queues, one per variable to train. Gradients are pushed to
these queues and the chief worker will dequeue_many and then sum them
before applying to variables.
* 1 `token` queue where the optimizer pushes the new global_step value after
all gradients have been applied.
The following variables are created:
* N `local_step`, one per replica. Compared against global step to check for
staleness of the gradients.
This adds nodes to the graph to collect gradients and pause the trainers until
variables are updated.
For the PS:
<empty line>
1. A queue is created for each variable, and each replica now pushes the
gradients into the queue instead of directly applying them to the
variables.
2. For each gradient_queue, pop and sum the gradients once enough
replicas (replicas_to_aggregate) have pushed gradients to the queue.
3. Apply the aggregated gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, clear all the gradients in the queues as they are
stale now (could happen when replicas are restarted and push to the queues
multiple times, or from the backup replicas).
6. Only after step 5, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch it to its local_step variable
and start the next batch.
For the replicas:
<empty line>
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into `gradient_queue` only
if local_step equals global_step, otherwise the gradients are just dropped.
This avoids stale gradients.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
opt = tf.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
replica_id=task_id, total_num_replicas=50)
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
# Some models have startup_delays to help stablize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
grads = opt.minimize(total_loss, global_step=self.global_step)
# You can now call get_init_tokens_op() and get_chief_queue_runner().
# Note that get_init_tokens_op() must be called before creating session
# because it modifies the graph.
init_token_op = opt.get_init_tokens_op()
chief_queue_runner = opt.get_chief_queue_runner()
```
In the training program, every worker will run the train_op as if not
synchronized. But one worker (usually the chief) will need to execute the
chief_queue_runner and get_init_tokens_op generated from this optimizer.
```python
# After the session is created by the superviser and before the main while
# loop:
if is_chief and FLAGS.sync_replicas:
sv.start_queue_runners(sess, [chief_queue_runner])
# Insert initial tokens to the queue.
sess.run(init_token_op)
```
@@__init__
@@compute_gradients
@@apply_gradients
@@get_chief_queue_runner
@@get_init_tokens_op
"""
def __init__(self,
opt,
replicas_to_aggregate,
variable_averages=None,
variables_to_average=None,
replica_id=None,
total_num_replicas=0,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
replica_id: This is the task/worker/replica ID. Needed as index to access
local_steps to check staleness. Must be in the interval:
[0, total_num_replicas)
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas == 0:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizer, self).__init__(use_locking, name)
logging.info(
"SyncReplicas enabled: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._replica_id = replica_id
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# This will be executed in a queue runner and includes the synchronization
# operations done by the chief.
self._chief_queue_runner = None
# Remember which queue is on which device for the "clear" operation.
# This list contains list of the following format: (grad_queue, device).
self._one_element_queue_list = []
# Sparse gradients queue has both value and index
self._sparse_grad_queues_and_devs = []
# clean_up_op will be executed when the chief is about to restart.
# If chief restarts, it is possible that some variables have already been
# updated before and when chief comes back, these variables will not be
# updated again as the workers have already computed the gradients for
# them.
# But chief still waits for all variables to be updated, which will hang
# the training.
# To avoid such hang, every time the chief is about to die, it will call
# abort_op to kill the PS with the token_queue so all replicas will also
# restart.
# TODO(jmchen): When training restarts, the variables are restored from the
# previous checkpoint. As such all the gradients in all the queues should be
# removed as they are computed from potentially different variables.
# Currently this is not done.
self._clean_up_op = None
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def _aggregate_sparse_grad(self, grad, var, train_ops):
"""Aggregate sparse gradients.
Args:
grad: The sparse gradient to aggregate.
var: The variable to apply this gradient to.
train_ops: The train_ops for the worker to run.
Returns:
aggregated_grad: Aggregated grad.
"""
# Sparse gradients have to be inserted as one pair of (value,
# indice) as an element instead of the whole "indexedslice" because
# their shapes are not deterministic.
sparse_grad_queue = (data_flow_ops.FIFOQueue(
-1,
(grad.values.dtype, grad.indices.dtype),
shapes=(var.get_shape().as_list()[1:], ()),
shared_name="sparse_grad_q_%s" % var.name))
self._sparse_grad_queues_and_devs.append((sparse_grad_queue, var.device))
# Sparse token is inserted after the "enqueue_many" finishes. This
# is needed to make sure enough sparse gradients have been enqueued
# before applying them to the variables.
sparse_token_queue = (data_flow_ops.FIFOQueue(
self._replicas_to_aggregate * 2,
types_pb2.DT_INT32,
shapes=(),
shared_name="sparse_token_q_%s" % var.name))
self._one_element_queue_list.append((sparse_token_queue, var.device))
enqueue_spares_op = sparse_grad_queue.enqueue_many([grad.values,
grad.indices])
with ops.control_dependencies([enqueue_spares_op]):
train_ops.append(sparse_token_queue.enqueue((1,)))
with ops.control_dependencies([sparse_token_queue.dequeue_many(
self._replicas_to_aggregate)]):
values, indices = sparse_grad_queue.dequeue_many(sparse_grad_queue.size())
concat_grad = ops.IndexedSlices(values, indices, grad.dense_shape)
# Sum the gradients of the same variables in the sparse layers so
# that each variable is only updated once. Note that with 2
# gradients g1 and g2 from 2 replicas for the same variable,
# apply(g1+g2) is different from apply(g1) and then apply(g2) when
# the optimizer is complex like Momentum or Adagrad.
values = concat_grad.values
indices = concat_grad.indices
new_indices, indx = array_ops.unique(indices)
num_indices = array_ops.shape(new_indices)[0]
sum_values = math_ops.unsorted_segment_sum(values, indx, num_indices)
return ops.IndexedSlices(sum_values, new_indices, concat_grad.dense_shape)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
inputs = []
var_list = []
for x in grads_and_vars:
inputs.extend(list(x))
with ops.device(global_step.device):
self._local_steps = variables.Variable(
array_ops.zeros(
[self._total_num_replicas],
dtype=global_step.dtype),
trainable=False,
name="local_steps")
# Check staleness. Note that this has to be ref(), otherwise identity will
# be accessed and it will be old values.
local_step = array_ops.slice(self._local_steps.ref(),
array_ops.reshape(self._replica_id, (1,)),
[1],
name="get_local_step")
local_step = array_ops.reshape(local_step, ())
is_stale = math_ops.less(local_step, global_step)
with ops.op_scope(inputs, None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
if isinstance(grad, ops.Tensor):
gradient_queue = (data_flow_ops.FIFOQueue(self._tokens_per_step * 2,
grad.dtype,
shapes=var.get_shape(),
shared_name=var.name))
self._one_element_queue_list.append((gradient_queue, var.device))
train_ops.append(gradient_queue.enqueue([grad]))
# Aggregate all gradients
gradients = gradient_queue.dequeue_many(
self._replicas_to_aggregate)
aggregated_grad.append(math_ops.reduce_sum(gradients, [0]))
elif grad is None:
aggregated_grad.append(None) # pass-through.
else:
if not isinstance(grad, ops.IndexedSlices):
raise ValueError("Unknown grad type!")
aggregated_grad.append(self._aggregate_sparse_grad(grad, var,
train_ops))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
# dummy_queue is passed to the queue runner. Don't use the real queues
# because the queue runner doesn't automatically reopen it once it
# closed queues in PS devices.
dummy_queue = (
data_flow_ops.FIFOQueue(1,
types_pb2.DT_INT32,
shapes=(),
shared_name="dummy_queue"))
# Clear all the gradients queues in case there are stale gradients.
clear_queue_ops = []
with ops.control_dependencies([update_op]):
for queue, dev in self._one_element_queue_list:
with ops.device(dev):
stale_grads = queue.dequeue_many(queue.size())
clear_queue_ops.append(stale_grads)
for queue, dev in self._sparse_grad_queues_and_devs:
with ops.device(dev):
_, stale_indices = queue.dequeue_many(queue.size())
clear_queue_ops.append(stale_indices)
with ops.device(global_step.device):
self._clean_up_op = control_flow_ops.abort(
error_msg="From sync_replicas")
# According to the staleness, select between the enqueue op (real_grad)
# or no-op (no_op_grad). Effectively dropping all the stale gradients.
no_op_grad = lambda: [control_flow_ops.no_op(name="no_grad_enqueue")]
real_grad = lambda: [control_flow_ops.group(*train_ops)]
final_train_ops = control_flow_ops.cond(is_stale, no_op_grad, real_grad)
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies([final_train_ops]):
token = sync_token_queue.dequeue()
train_op = state_ops.scatter_update(self._local_steps,
self._replica_id, token)
with ops.control_dependencies(clear_queue_ops):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
# Note that ref() is used to avoid reading from the identity with old
# the step.
tokens = array_ops.fill([self._tokens_per_step], global_step.ref())
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(dummy_queue,
[sync_op])
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_clean_up_op(self):
"""Returns the clean up op for the chief to execute before exit.
This includes the operation to abort the device with the token queue so all
other replicas can also restart. This can avoid potential hang when chief
restarts.
Note that this can only be called after calling apply_gradients().
Returns:
A clean_up_op for chief to execute before exits.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError(
"get_clean_up_op() should be called after apply_gradients().")
return self._clean_up_op
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens],
self._global_step.ref())
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
| apache-2.0 |
bravominski/PennApps2015-HeartMates | venv/lib/python2.7/site-packages/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| apache-2.0 |
korzeniewskipl/python-mode | pymode/libs3/rope/base/oi/doa.py | 32 | 5029 | import pickle
import marshal
import os
import socket
import subprocess
import sys
import tempfile
import threading
class PythonFileRunner(object):
"""A class for running python project files"""
def __init__(self, pycore, file_, args=None, stdin=None,
stdout=None, analyze_data=None):
self.pycore = pycore
self.file = file_
self.analyze_data = analyze_data
self.observers = []
self.args = args
self.stdin = stdin
self.stdout = stdout
def run(self):
"""Execute the process"""
env = dict(os.environ)
file_path = self.file.real_path
path_folders = self.pycore.get_source_folders() + \
self.pycore.get_python_path_folders()
env['PYTHONPATH'] = os.pathsep.join(folder.real_path
for folder in path_folders)
runmod_path = self.pycore.find_module('rope.base.oi.runmod').real_path
self.receiver = None
self._init_data_receiving()
send_info = '-'
if self.receiver:
send_info = self.receiver.get_send_info()
args = [sys.executable, runmod_path, send_info,
self.pycore.project.address, self.file.real_path]
if self.analyze_data is None:
del args[1:4]
if self.args is not None:
args.extend(self.args)
self.process = subprocess.Popen(
executable=sys.executable, args=args, env=env,
cwd=os.path.split(file_path)[0], stdin=self.stdin,
stdout=self.stdout, stderr=self.stdout, close_fds=os.name != 'nt')
def _init_data_receiving(self):
if self.analyze_data is None:
return
# Disabling FIFO data transfer due to blocking when running
# unittests in the GUI.
# XXX: Handle FIFO data transfer for `rope.ui.testview`
if True or os.name == 'nt':
self.receiver = _SocketReceiver()
else:
self.receiver = _FIFOReceiver()
self.receiving_thread = threading.Thread(target=self._receive_information)
self.receiving_thread.setDaemon(True)
self.receiving_thread.start()
def _receive_information(self):
#temp = open('/dev/shm/info', 'w')
for data in self.receiver.receive_data():
self.analyze_data(data)
#temp.write(str(data) + '\n')
#temp.close()
for observer in self.observers:
observer()
def wait_process(self):
"""Wait for the process to finish"""
self.process.wait()
if self.analyze_data:
self.receiving_thread.join()
def kill_process(self):
"""Stop the process"""
if self.process.poll() is not None:
return
try:
if hasattr(self.process, 'terminate'):
self.process.terminate()
elif os.name != 'nt':
os.kill(self.process.pid, 9)
else:
import ctypes
handle = int(self.process._handle)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
except OSError:
pass
def add_finishing_observer(self, observer):
"""Notify this observer when execution finishes"""
self.observers.append(observer)
class _MessageReceiver(object):
def receive_data(self):
pass
def get_send_info(self):
pass
class _SocketReceiver(_MessageReceiver):
def __init__(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.data_port = 3037
while self.data_port < 4000:
try:
self.server_socket.bind(('', self.data_port))
break
except socket.error as e:
self.data_port += 1
self.server_socket.listen(1)
def get_send_info(self):
return str(self.data_port)
def receive_data(self):
conn, addr = self.server_socket.accept()
self.server_socket.close()
my_file = conn.makefile('rb')
while True:
try:
yield pickle.load(my_file)
except EOFError:
break
my_file.close()
conn.close()
class _FIFOReceiver(_MessageReceiver):
def __init__(self):
# XXX: this is insecure and might cause race conditions
self.file_name = self._get_file_name()
os.mkfifo(self.file_name)
def _get_file_name(self):
prefix = tempfile.gettempdir() + '/__rope_'
i = 0
while os.path.exists(prefix + str(i).rjust(4, '0')):
i += 1
return prefix + str(i).rjust(4, '0')
def get_send_info(self):
return self.file_name
def receive_data(self):
my_file = open(self.file_name, 'rb')
while True:
try:
yield marshal.load(my_file)
except EOFError:
break
my_file.close()
os.remove(self.file_name)
| lgpl-3.0 |
allisony/pyspeckit | ah_bootstrap.py | 16 | 35044 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
req = DIST_NAME
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
| mit |
SSL-Roots/CON-SAI | decision_making/scripts/plays/play_inplay_our_defence.py | 1 | 2025 |
from play_base import Play
from tactics.tactic_keep import TacticKeep
from tactics.tactic_intersection import TacticIntersection
from tactics.tactic_interpose import TacticInterpose
from tactics.tactic_clear import TacticClear
from tactics.tactic_position import TacticPosition
from consai_msgs.msg import Pose
import constants
class PlayInPlayOurDefence(Play):
def __init__(self):
super(PlayInPlayOurDefence, self).__init__('PlayInPlayOurDefence')
self.applicable = "BALL_IN_OUR_DEFENCE"
self.done_aborted = "BALL_IN_OUR_DEFENCE"
self.roles[0].loop_enable = True
self.roles[0].behavior.add_child(
TacticClear('TacticClear', self.roles[0].my_role)
)
self.roles[1].loop_enable = True
self.roles[1].behavior.add_child(
TacticPosition('TacticPosition', self.roles[1].my_role,
0, 0, 0)
)
self.roles[2].loop_enable = True
self.roles[2].behavior.add_child(
TacticInterpose('TacticInterpose', self.roles[2].my_role,
to_dist = 1.5)
)
range_y = constants.FieldHalfY - 0.7
self.roles[3].loop_enable = True
self.roles[3].behavior.add_child(
TacticKeep('TacticKeep', self.roles[3].my_role, keep_x = -2.0,
range_high = range_y,
range_low = 0.5)
)
self.roles[4].loop_enable = True
self.roles[4].behavior.add_child(
TacticKeep('TacticKeep', self.roles[4].my_role, keep_x = -2.0,
range_high = -0.5,
range_low = -range_y)
)
pose1 = Pose(-2.5, range_y, 0)
pose2 = Pose(-2.5, -range_y, 0)
self.roles[5].loop_enable = True
self.roles[5].behavior.add_child(
TacticIntersection('TacticIntersection', self.roles[5].my_role,
pose1 = pose1, pose2 = pose2)
)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.