code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
# Support for excporting Qt's MenuBars/Menus over DBUS. The API is defined in
# dbus-menu.xml from the libdbusmenu project https://launchpad.net/libdbusmenu
import dbus, sip
from PyQt5.Qt import (
QApplication, QMenu, QIcon, QKeySequence, QObject, QEvent, QTimer, pyqtSignal, Qt)
from calibre.utils.dbus_service import Object, BusName, method as dbus_method, dbus_property, signal as dbus_signal
from calibre.gui2.dbus_export.utils import (
setup_for_cli_run, swap_mnemonic_char, key_sequence_to_dbus_shortcut, icon_to_dbus_menu_icon)
null = object()
def PropDict(mapping=()):
return dbus.Dictionary(mapping, signature='sv')
def create_properties_for_action(ac, previous=None):
ans = PropDict()
if ac.isSeparator():
ans['type'] = 'separator'
if not ac.isVisible():
ans['visible'] = False
return ans
text = ac.text() or ac.iconText()
if text:
ans['label'] = swap_mnemonic_char(text)
if not ac.isEnabled():
ans['enabled'] = False
if not ac.isVisible() or ac.property('blocked') is True:
ans['visible'] = False
if ac.menu() is not None:
ans['children-display'] = 'submenu'
if ac.isCheckable():
exclusive = ac.actionGroup() is not None and ac.actionGroup().isExclusive()
ans['toggle-type'] = 'radio' if exclusive else 'checkmark'
ans['toggle-state'] = int(ac.isChecked())
shortcuts = ac.shortcuts()
if shortcuts:
sc = dbus.Array(signature='as')
for s in shortcuts:
if not s.isEmpty():
for x in key_sequence_to_dbus_shortcut(s):
sc.append(dbus.Array(x, signature='s'))
if sc:
ans['shortcut'] = sc[:1] # Unity fails to display the shortcuts at all if more than one is specified
if ac.isIconVisibleInMenu():
icon = ac.icon()
if previous and previous.get('x-qt-icon-cache-key') == icon.cacheKey():
for x in 'icon-data x-qt-icon-cache-key'.split():
ans[x] = previous[x]
else:
data = icon_to_dbus_menu_icon(ac.icon())
if data is not None:
ans['icon-data'] = data
ans['x-qt-icon-cache-key'] = icon.cacheKey()
return ans
def menu_actions(menu):
try:
return menu.actions()
except TypeError:
if isinstance(menu, QMenu):
return QMenu.actions(menu)
raise
class DBusMenu(QObject):
handle_event_signal = pyqtSignal(object, object, object, object)
def __init__(self, object_path, parent=None, bus=None):
QObject.__init__(self, parent)
# Unity barfs is the Event DBUS method does not return immediately, so
# handle it asynchronously
self.handle_event_signal.connect(self.handle_event, type=Qt.QueuedConnection)
self.dbus_api = DBusMenuAPI(self, object_path, bus=bus)
self.set_status = self.dbus_api.set_status
self._next_id = 0
self.action_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.actions_changed)
self.layout_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.layouts_changed)
self.init_maps()
@property
def object_path(self):
return self.dbus_api._object_path
def init_maps(self, qmenu=None):
self.action_changes = set()
self.layout_changes = set()
self.qmenu = qmenu
self._id_to_action, self._action_to_id = {}, {}
self._action_properties = {}
@property
def next_id(self):
self._next_id += 1
return self._next_id
def id_to_action(self, action_id):
if self.qmenu is None:
return None
return self._id_to_action.get(action_id)
def action_to_id(self, action):
if self.qmenu is None:
return None
return self._action_to_id.get(action)
def action_properties(self, action_id, restrict_to=None):
if self.qmenu is None:
return {}
ans = self._action_properties.get(action_id, PropDict())
if restrict_to:
ans = PropDict({k:v for k, v in ans.iteritems() if k in restrict_to})
return ans
def publish_new_menu(self, qmenu=None):
self.init_maps(qmenu)
if qmenu is not None:
qmenu.destroyed.connect(lambda obj=None:self.publish_new_menu())
ac = qmenu.menuAction()
self.add_action(ac)
self.dbus_api.LayoutUpdated(self.dbus_api.revision, 0)
def set_visible(self, visible):
ac = self.id_to_action(0)
if ac is not None and self.qmenu is not None:
changed = False
blocked = not visible
for ac in menu_actions(ac.menu()):
ac_id = self.action_to_id(ac)
if ac_id is not None:
old = ac.property('blocked')
if old is not blocked:
ac.setProperty('blocked', blocked)
self.action_changes.add(ac_id)
changed = True
if changed:
self.action_changed_timer.start()
def add_action(self, ac):
ac_id = 0 if ac.menu() is self.qmenu else self.next_id
self._id_to_action[ac_id] = ac
self._action_to_id[ac] = ac_id
self._action_properties[ac_id] = create_properties_for_action(ac)
if ac.menu() is not None:
self.add_menu(ac.menu())
def add_menu(self, menu):
menu.installEventFilter(self)
for ac in menu_actions(menu):
self.add_action(ac)
def eventFilter(self, obj, ev):
ac = getattr(obj, 'menuAction', lambda : None)()
ac_id = self.action_to_id(ac)
if ac_id is not None:
etype = ev.type()
if etype == QEvent.ActionChanged:
ac_id = self.action_to_id(ev.action())
self.action_changes.add(ac_id)
self.action_changed_timer.start()
elif etype == QEvent.ActionAdded:
self.layout_changes.add(ac_id)
self.layout_changed_timer.start()
self.add_action(ev.action())
elif etype == QEvent.ActionRemoved:
self.layout_changes.add(ac_id)
self.layout_changed_timer.start()
self.action_removed(ev.action())
return False
def actions_changed(self):
updated_props = dbus.Array(signature='(ia{sv})')
removed_props = dbus.Array(signature='(ias)')
for ac_id in self.action_changes:
ac = self.id_to_action(ac_id)
if ac is None:
continue
old_props = self.action_properties(ac_id)
new_props = self._action_properties[ac_id] = create_properties_for_action(ac, old_props)
removed = set(old_props) - set(new_props)
if removed:
removed_props.append((ac_id, dbus.Array(removed, signature='as')))
updated = PropDict({k:v for k, v in new_props.iteritems() if v != old_props.get(k, null)})
if updated:
updated_props.append((ac_id, updated))
self.action_changes = set()
if updated_props or removed_props:
self.dbus_api.ItemsPropertiesUpdated(updated_props, removed_props)
return updated_props, removed_props
def layouts_changed(self):
changes = set()
for ac_id in self.layout_changes:
if ac_id in self._id_to_action:
changes.add(ac_id)
self.layout_changes = set()
if changes:
self.dbus_api.revision += 1
for change in changes:
self.dbus_api.LayoutUpdated(self.dbus_api.revision, change)
return changes
def action_is_in_a_menu(self, ac):
if sip.isdeleted(ac):
return False
all_menus = {a.menu() for a in self._action_to_id if not sip.isdeleted(a)}
all_menus.discard(None)
return bool(set(ac.associatedWidgets()).intersection(all_menus))
def action_removed(self, ac):
if not self.action_is_in_a_menu(ac):
ac_id = self._action_to_id.pop(ac, None)
self._id_to_action.pop(ac_id, None)
self._action_properties.pop(ac_id, None)
def get_layout(self, parent_id, depth, property_names):
# Ensure any pending updates are done, as they are needed now
self.actions_changed()
self.layouts_changed()
property_names = property_names or None
props = self.action_properties(parent_id, property_names)
return parent_id, props, self.get_layout_children(parent_id, depth, property_names)
def get_layout_children(self, parent_id, depth, property_names):
ans = dbus.Array(signature='(ia{sv}av)')
ac = self.id_to_action(parent_id)
if ac is not None and depth != 0 and ac.menu() is not None:
for child in menu_actions(ac.menu()):
child_id = self.action_to_id(child)
if child_id is not None:
props = self.action_properties(child_id, property_names)
ans.append((child_id, props, self.get_layout_children(child_id, depth - 1, property_names)))
return ans
def get_properties(self, ids=None, property_names=None):
property_names = property_names or None
ans = dbus.Array(signature='(ia{sv})')
for action_id in (ids or self._id_to_action):
ans.append((action_id, self.action_properties(action_id, property_names)))
return ans
def handle_event(self, action_id, event, data, timestamp):
ac = self.id_to_action(action_id)
if event == 'clicked':
if ac.isCheckable():
ac.toggle()
ac.triggered.emit(ac.isCheckable() and ac.isChecked())
def handle_about_to_show(self, ac):
child_ids = {self.action_to_id(x) for x in menu_actions(ac.menu())}
child_ids.discard(None)
ac_id = self.action_to_id(ac)
ac.menu().aboutToShow.emit()
if ac_id in self.layout_changes or child_ids.intersection(self.action_changes):
return True
return False
class DBusMenuAPI(Object):
IFACE = 'com.canonical.dbusmenu'
def __init__(self, menu, object_path, bus=None):
if bus is None:
bus = dbus.SessionBus()
Object.__init__(self, bus, object_path)
self.status = 'normal'
self.menu = menu
self.revision = 0
@dbus_property(IFACE, signature='u')
def Version(self):
return 3 # GTK 3 uses 3, KDE 4 uses 2
@dbus_property(IFACE, signature='s', emits_changed_signal=True)
def Status(self):
return self.status
def set_status(self, normal=True):
self.status = 'normal' if normal else 'notice'
self.PropertiesChanged(self.IFACE, {'Status': self.status}, [])
@dbus_property(IFACE, signature='s')
def TextDirection(self):
return 'ltr' if QApplication.instance().isLeftToRight() else 'rtl'
@dbus_property(IFACE, signature='as')
def IconThemePath(self):
return dbus.Array(signature='s')
@dbus_method(IFACE, in_signature='iias', out_signature='u(ia{sv}av)')
def GetLayout(self, parentId, recursionDepth, propertyNames):
layout = self.menu.get_layout(parentId, recursionDepth, propertyNames)
return self.revision, layout
@dbus_method(IFACE, in_signature='aias', out_signature='a(ia{sv})')
def GetGroupProperties(self, ids, propertyNames):
return self.menu.get_properties(ids, propertyNames)
@dbus_method(IFACE, in_signature='is', out_signature='v')
def GetProperty(self, id, name):
return self.menu.action_properties(id).get(name, '')
@dbus_method(IFACE, in_signature='isvu', out_signature='')
def Event(self, id, eventId, data, timestamp):
''' This is called by the applet to notify the application an event happened on a
menu item. eventId can be one of the following::
* "clicked"
* "hovered"
* "opened"
* "closed"
Vendor specific events can be added by prefixing them with "x-<vendor>-"'''
if self.menu.id_to_action(id) is not None:
self.menu.handle_event_signal.emit(id, eventId, data, timestamp)
@dbus_method(IFACE, in_signature='a(isvu)', out_signature='ai')
def EventGroup(self, events):
''' Used to pass a set of events as a single message for possibily
several different menuitems. This is done to optimize DBus traffic.
Should return a list of ids that are not found. events is a list of
events in the same format as used for the Event method.'''
missing = dbus.Array(signature='u')
for id, eventId, data, timestamp in events:
if self.menu.id_to_action(id) is not None:
self.menu.handle_event_signal.emit(id, eventId, data, timestamp)
else:
missing.append(id)
return missing
@dbus_method(IFACE, in_signature='i', out_signature='b')
def AboutToShow(self, id):
ac = self.menu.id_to_action(id)
if ac is not None and ac.menu() is not None:
return self.menu.handle_about_to_show(ac)
return False
@dbus_method(IFACE, in_signature='ai', out_signature='aiai')
def AboutToShowGroup(self, ids):
updates_needed = dbus.Array(signature='i')
id_errors = dbus.Array(signature='i')
for ac_id in ids:
ac = self.menu.id_to_action(id)
if ac is not None and ac.menu() is not None:
if self.menu.handle_about_to_show(ac):
updates_needed.append(ac_id)
else:
id_errors.append(ac_id)
return updates_needed, id_errors
@dbus_signal(IFACE, 'a(ia{sv})a(ias)')
def ItemsPropertiesUpdated(self, updatedProps, removedProps):
pass
@dbus_signal(IFACE, 'ui')
def LayoutUpdated(self, revision, parent):
pass
@dbus_signal(IFACE, 'iu')
def ItemActivationRequested(self, id, timestamp):
pass
def test():
setup_for_cli_run()
app = QApplication([])
bus = dbus.SessionBus()
dbus_name = BusName('com.calibre-ebook.TestDBusMenu', bus=bus, do_not_queue=True)
m = QMenu()
ac = m.addAction(QIcon(I('window-close.png')), 'Quit', app.quit)
ac.setShortcut(QKeySequence('Ctrl+Q'))
menu = DBusMenu('/Menu', bus=bus)
menu.publish_new_menu(m)
app.exec_()
del dbus_name
if __name__ == '__main__':
test()
| drxaero/calibre | src/calibre/gui2/dbus_export/menu.py | Python | gpl-3.0 | 14,935 |
from ..broker import Broker
class DeviceServiceServiceBroker(Broker):
controller = "device_service_services"
def show(self, **kwargs):
"""Shows the details for the specified device service service.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service service methods. The listed methods will be called on each device service service returned and included in the output. Available methods are: parent_device_service, child_device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device_service, child_device_service, data_source, device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_service: The device service service identified by the specified DeviceServiceServiceID.
:rtype device_service_service: DeviceServiceService
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available device service services. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which belongs this services.
:type DeviceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device service services as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service service methods. The listed methods will be called on each device service service returned and included in the output. Available methods are: parent_device_service, child_device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device_service, child_device_service, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceServiceServiceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceServiceServiceID. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceServiceService. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_services: An array of the DeviceServiceService objects that match the specified input criteria.
:rtype device_service_services: Array of DeviceServiceService
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available device service services matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ChildDeviceServiceID: The internal NetMRI identifier of the child service (the used service).
:type ChildDeviceServiceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which belongs this services.
:type DeviceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ParentDeviceServiceID: The internal NetMRI identifier of the parent service (the user).
:type ParentDeviceServiceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvChangedCols: The fields that changed between this revision of the record and the previous revision.
:type SvsvChangedCols: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvEndTime: The ending effective time of this record, or empty if still in effect.
:type SvsvEndTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvFirstSeenTime: The timestamp of when NetMRI saw for the first time this relationship.
:type SvsvFirstSeenTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvProvisionData: Internal data - do not modify, may change without warning.
:type SvsvProvisionData: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvStartTime: The starting effective time of this record.
:type SvsvStartTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvTimestamp: The date and time this record was collected or calculated.
:type SvsvTimestamp: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvUsage: An indicator of the kind of relationship. One of : child, protID, srcPrtID, dstPrtID, protDstID. The regular indicator is 'child'.
:type SvsvUsage: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device service services as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service service methods. The listed methods will be called on each device service service returned and included in the output. Available methods are: parent_device_service, child_device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device_service, child_device_service, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceServiceServiceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceServiceServiceID. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceServiceService. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device service services, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: ChildDeviceServiceID, DataSourceID, DeviceID, DeviceServiceServiceID, ParentDeviceServiceID, SvsvChangedCols, SvsvEndTime, SvsvFirstSeenTime, SvsvProvisionData, SvsvStartTime, SvsvTimestamp, SvsvUsage.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_services: An array of the DeviceServiceService objects that match the specified input criteria.
:rtype device_service_services: Array of DeviceServiceService
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device service services matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: ChildDeviceServiceID, DataSourceID, DeviceID, DeviceServiceServiceID, ParentDeviceServiceID, SvsvChangedCols, SvsvEndTime, SvsvFirstSeenTime, SvsvProvisionData, SvsvStartTime, SvsvTimestamp, SvsvUsage.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ChildDeviceServiceID: The operator to apply to the field ChildDeviceServiceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ChildDeviceServiceID: The internal NetMRI identifier of the child service (the used service). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ChildDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ChildDeviceServiceID: If op_ChildDeviceServiceID is specified, the field named in this input will be compared to the value in ChildDeviceServiceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ChildDeviceServiceID must be specified if op_ChildDeviceServiceID is specified.
:type val_f_ChildDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ChildDeviceServiceID: If op_ChildDeviceServiceID is specified, this value will be compared to the value in ChildDeviceServiceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ChildDeviceServiceID must be specified if op_ChildDeviceServiceID is specified.
:type val_c_ChildDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device to which belongs this services. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceServiceServiceID: The operator to apply to the field DeviceServiceServiceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceServiceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceServiceServiceID: If op_DeviceServiceServiceID is specified, the field named in this input will be compared to the value in DeviceServiceServiceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceServiceServiceID must be specified if op_DeviceServiceServiceID is specified.
:type val_f_DeviceServiceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceServiceServiceID: If op_DeviceServiceServiceID is specified, this value will be compared to the value in DeviceServiceServiceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceServiceServiceID must be specified if op_DeviceServiceServiceID is specified.
:type val_c_DeviceServiceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ParentDeviceServiceID: The operator to apply to the field ParentDeviceServiceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ParentDeviceServiceID: The internal NetMRI identifier of the parent service (the user). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ParentDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ParentDeviceServiceID: If op_ParentDeviceServiceID is specified, the field named in this input will be compared to the value in ParentDeviceServiceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ParentDeviceServiceID must be specified if op_ParentDeviceServiceID is specified.
:type val_f_ParentDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ParentDeviceServiceID: If op_ParentDeviceServiceID is specified, this value will be compared to the value in ParentDeviceServiceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ParentDeviceServiceID must be specified if op_ParentDeviceServiceID is specified.
:type val_c_ParentDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvChangedCols: The operator to apply to the field SvsvChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvChangedCols: If op_SvsvChangedCols is specified, the field named in this input will be compared to the value in SvsvChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvChangedCols must be specified if op_SvsvChangedCols is specified.
:type val_f_SvsvChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvChangedCols: If op_SvsvChangedCols is specified, this value will be compared to the value in SvsvChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvChangedCols must be specified if op_SvsvChangedCols is specified.
:type val_c_SvsvChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvEndTime: The operator to apply to the field SvsvEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvEndTime: If op_SvsvEndTime is specified, the field named in this input will be compared to the value in SvsvEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvEndTime must be specified if op_SvsvEndTime is specified.
:type val_f_SvsvEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvEndTime: If op_SvsvEndTime is specified, this value will be compared to the value in SvsvEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvEndTime must be specified if op_SvsvEndTime is specified.
:type val_c_SvsvEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvFirstSeenTime: The operator to apply to the field SvsvFirstSeenTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvFirstSeenTime: The timestamp of when NetMRI saw for the first time this relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvFirstSeenTime: If op_SvsvFirstSeenTime is specified, the field named in this input will be compared to the value in SvsvFirstSeenTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvFirstSeenTime must be specified if op_SvsvFirstSeenTime is specified.
:type val_f_SvsvFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvFirstSeenTime: If op_SvsvFirstSeenTime is specified, this value will be compared to the value in SvsvFirstSeenTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvFirstSeenTime must be specified if op_SvsvFirstSeenTime is specified.
:type val_c_SvsvFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvProvisionData: The operator to apply to the field SvsvProvisionData. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvProvisionData: Internal data - do not modify, may change without warning. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvProvisionData: If op_SvsvProvisionData is specified, the field named in this input will be compared to the value in SvsvProvisionData using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvProvisionData must be specified if op_SvsvProvisionData is specified.
:type val_f_SvsvProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvProvisionData: If op_SvsvProvisionData is specified, this value will be compared to the value in SvsvProvisionData using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvProvisionData must be specified if op_SvsvProvisionData is specified.
:type val_c_SvsvProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvStartTime: The operator to apply to the field SvsvStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvStartTime: If op_SvsvStartTime is specified, the field named in this input will be compared to the value in SvsvStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvStartTime must be specified if op_SvsvStartTime is specified.
:type val_f_SvsvStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvStartTime: If op_SvsvStartTime is specified, this value will be compared to the value in SvsvStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvStartTime must be specified if op_SvsvStartTime is specified.
:type val_c_SvsvStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvTimestamp: The operator to apply to the field SvsvTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvTimestamp: If op_SvsvTimestamp is specified, the field named in this input will be compared to the value in SvsvTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvTimestamp must be specified if op_SvsvTimestamp is specified.
:type val_f_SvsvTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvTimestamp: If op_SvsvTimestamp is specified, this value will be compared to the value in SvsvTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvTimestamp must be specified if op_SvsvTimestamp is specified.
:type val_c_SvsvTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvUsage: The operator to apply to the field SvsvUsage. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvUsage: An indicator of the kind of relationship. One of : child, protID, srcPrtID, dstPrtID, protDstID. The regular indicator is 'child'. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvUsage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvUsage: If op_SvsvUsage is specified, the field named in this input will be compared to the value in SvsvUsage using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvUsage must be specified if op_SvsvUsage is specified.
:type val_f_SvsvUsage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvUsage: If op_SvsvUsage is specified, this value will be compared to the value in SvsvUsage using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvUsage must be specified if op_SvsvUsage is specified.
:type val_c_SvsvUsage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device service services as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service service methods. The listed methods will be called on each device service service returned and included in the output. Available methods are: parent_device_service, child_device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device_service, child_device_service, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceServiceServiceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceServiceServiceID. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceServiceService. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_services: An array of the DeviceServiceService objects that match the specified input criteria.
:rtype device_service_services: Array of DeviceServiceService
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def parent_device_service(self, **kwargs):
"""The parent service object of this relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The parent service object of this relationship.
:rtype : DeviceService
"""
return self.api_request(self._get_method_fullname("parent_device_service"), kwargs)
def child_device_service(self, **kwargs):
"""The child service object of this relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The child service object of this relationship.
:rtype : DeviceService
"""
return self.api_request(self._get_method_fullname("child_device_service"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
| infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_8_0/device_service_service_broker.py | Python | apache-2.0 | 49,305 |
#! /usr/bin/env python
#
# See README for usage instructions.
import glob
import os
import subprocess
import sys
import platform
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
from setuptools import setup, Extension, find_packages
from distutils.command.clean import clean as _clean
if sys.version_info[0] == 3:
# Python 3
from distutils.command.build_py import build_py_2to3 as _build_py
else:
# Python 2
from distutils.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def GetVersion():
"""Gets the version from google/protobuf/__init__.py
Do not import google.protobuf.__init__ directly, because an installed
protobuf library may be loaded instead."""
with open(os.path.join('google', 'protobuf', '__init__.py')) as version_file:
exec(version_file.read(), globals())
return __version__
def generate_proto(source, require = True):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
if not require and not os.path.exists(source):
return
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def GenerateUnittestProtos():
generate_proto("../src/google/protobuf/any_test.proto", False)
generate_proto("../src/google/protobuf/map_proto2_unittest.proto", False)
generate_proto("../src/google/protobuf/map_unittest.proto", False)
generate_proto("../src/google/protobuf/test_messages_proto3.proto", False)
generate_proto("../src/google/protobuf/test_messages_proto2.proto", False)
generate_proto("../src/google/protobuf/unittest_arena.proto", False)
generate_proto("../src/google/protobuf/unittest_no_arena.proto", False)
generate_proto("../src/google/protobuf/unittest_no_arena_import.proto", False)
generate_proto("../src/google/protobuf/unittest.proto", False)
generate_proto("../src/google/protobuf/unittest_custom_options.proto", False)
generate_proto("../src/google/protobuf/unittest_import.proto", False)
generate_proto("../src/google/protobuf/unittest_import_public.proto", False)
generate_proto("../src/google/protobuf/unittest_mset.proto", False)
generate_proto("../src/google/protobuf/unittest_mset_wire_format.proto", False)
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto", False)
generate_proto("../src/google/protobuf/unittest_proto3_arena.proto", False)
generate_proto("../src/google/protobuf/util/json_format_proto3.proto", False)
generate_proto("google/protobuf/internal/any_test.proto", False)
generate_proto("google/protobuf/internal/descriptor_pool_test1.proto", False)
generate_proto("google/protobuf/internal/descriptor_pool_test2.proto", False)
generate_proto("google/protobuf/internal/factory_test1.proto", False)
generate_proto("google/protobuf/internal/factory_test2.proto", False)
generate_proto("google/protobuf/internal/file_options_test.proto", False)
generate_proto("google/protobuf/internal/import_test_package/inner.proto", False)
generate_proto("google/protobuf/internal/import_test_package/outer.proto", False)
generate_proto("google/protobuf/internal/missing_enum_values.proto", False)
generate_proto("google/protobuf/internal/message_set_extensions.proto", False)
generate_proto("google/protobuf/internal/more_extensions.proto", False)
generate_proto("google/protobuf/internal/more_extensions_dynamic.proto", False)
generate_proto("google/protobuf/internal/more_messages.proto", False)
generate_proto("google/protobuf/internal/packed_field_test.proto", False)
generate_proto("google/protobuf/internal/test_bad_identifiers.proto", False)
generate_proto("google/protobuf/pyext/python.proto", False)
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o") or \
filepath.endswith('google/protobuf/compiler/__init__.py') or \
filepath.endswith('google/protobuf/util/__init__.py'):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
class build_py(_build_py):
def run(self):
# Generate necessary .proto file if it doesn't exist.
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
generate_proto("../src/google/protobuf/any.proto")
generate_proto("../src/google/protobuf/api.proto")
generate_proto("../src/google/protobuf/duration.proto")
generate_proto("../src/google/protobuf/empty.proto")
generate_proto("../src/google/protobuf/field_mask.proto")
generate_proto("../src/google/protobuf/source_context.proto")
generate_proto("../src/google/protobuf/struct.proto")
generate_proto("../src/google/protobuf/timestamp.proto")
generate_proto("../src/google/protobuf/type.proto")
generate_proto("../src/google/protobuf/wrappers.proto")
GenerateUnittestProtos()
# Make sure google.protobuf/** are valid packages.
for path in ['', 'internal/', 'compiler/', 'pyext/', 'util/']:
try:
open('google/protobuf/%s__init__.py' % path, 'a').close()
except EnvironmentError:
pass
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
class test_conformance(_build_py):
target = 'test_python'
def run(self):
if sys.version_info >= (2, 7):
# Python 2.6 dodges these extra failures.
os.environ["CONFORMANCE_PYTHON_EXTRA_FAILURES"] = (
"--failure_list failure_list_python-post26.txt")
cmd = 'cd ../conformance && make %s' % (test_conformance.target)
status = subprocess.check_call(cmd, shell=True)
def get_option_from_sys_argv(option_str):
if option_str in sys.argv:
sys.argv.remove(option_str)
return True
return False
if __name__ == '__main__':
ext_module_list = []
warnings_as_errors = '--warnings_as_errors'
if get_option_from_sys_argv('--cpp_implementation'):
# Link libprotobuf.a and libprotobuf-lite.a statically with the
# extension. Note that those libraries have to be compiled with
# -fPIC for this to work.
compile_static_ext = get_option_from_sys_argv('--compile_static_extension')
extra_compile_args = ['-Wno-write-strings',
'-Wno-invalid-offsetof',
'-Wno-sign-compare']
libraries = ['protobuf']
extra_objects = None
if compile_static_ext:
libraries = None
extra_objects = ['../src/.libs/libprotobuf.a',
'../src/.libs/libprotobuf-lite.a']
test_conformance.target = 'test_python_cpp'
if "clang" in os.popen('$CC --version 2> /dev/null').read():
extra_compile_args.append('-Wno-shorten-64-to-32')
v, _, _ = platform.mac_ver()
if v:
v = float('.'.join(v.split('.')[:2]))
if v >= 10.12:
extra_compile_args.append('-std=c++11')
if warnings_as_errors in sys.argv:
extra_compile_args.append('-Werror')
sys.argv.remove(warnings_as_errors)
# C++ implementation extension
ext_module_list.extend([
Extension(
"google.protobuf.pyext._message",
glob.glob('google/protobuf/pyext/*.cc'),
include_dirs=[".", "../src"],
libraries=libraries,
extra_objects=extra_objects,
library_dirs=['../src/.libs'],
extra_compile_args=extra_compile_args,
),
Extension(
"google.protobuf.internal._api_implementation",
glob.glob('google/protobuf/internal/api_implementation.cc'),
extra_compile_args=['-DPYTHON_PROTO2_CPP_IMPL_V2'],
),
])
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
# Keep this list of dependencies in sync with tox.ini.
install_requires = ['six>=1.9', 'setuptools']
if sys.version_info <= (2,7):
install_requires.append('ordereddict')
install_requires.append('unittest2')
setup(
name='protobuf',
version=GetVersion(),
description='Protocol Buffers',
download_url='https://github.com/google/protobuf/releases',
long_description="Protocol Buffers are Google's data interchange format",
url='https://developers.google.com/protocol-buffers/',
maintainer='protobuf@googlegroups.com',
maintainer_email='protobuf@googlegroups.com',
license='3-Clause BSD License',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
namespace_packages=['google'],
packages=find_packages(
exclude=[
'import_test_package',
],
),
test_suite='google.protobuf.internal',
cmdclass={
'clean': clean,
'build_py': build_py,
'test_conformance': test_conformance,
},
install_requires=install_requires,
ext_modules=ext_module_list,
)
| sunghan-chang/TizenRT | external/protobuf/python/setup.py | Python | apache-2.0 | 10,629 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_banner
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage multiline banners on VyOS devices
description:
- This will configure both pre-login and post-login banners on remote
devices running VyOS. It allows playbooks to add or remote
banner text from the active running configuration.
notes:
- Tested against VYOS 1.1.7
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device.
required: true
default: null
choices: ['pre-login', 'post-login']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires I(state=present).
default: null
state:
description:
- Specifies whether or not the configuration is present in the current
devices active running configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: configure the pre-login banner
vyos_banner:
banner: pre-login
text: |
this is my pre-login banner
that contains a multiline
string
state: present
- name: remove the post-login banner
vyos_banner:
banner: post-login
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner pre-login
- this is my pre-login banner
- that contains a multiline
- string
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.vyos.vyos import get_config, load_config
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def spec_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
if state == 'absent':
if have.get('state') != 'absent' or (have.get('state') != 'absent' and
'text' in have.keys() and have['text']):
commands.append('delete system login banner %s' % module.params['banner'])
elif state == 'present':
if want['text'] and want['text'].encode().decode('unicode_escape') != have.get('text'):
banner_cmd = 'set system login banner %s ' % module.params['banner']
banner_cmd += want['text'].strip()
commands.append(banner_cmd)
return commands
def config_to_dict(module):
data = get_config(module)
output = None
obj = {'banner': module.params['banner'], 'state': 'absent'}
for line in data.split('\n'):
if line.startswith('set system login banner %s' % obj['banner']):
match = re.findall(r'%s (.*)' % obj['banner'], line, re.M)
output = match
if output:
obj['text'] = output[0].encode().decode('unicode_escape')
obj['state'] = 'present'
return obj
def map_params_to_obj(module):
text = module.params['text']
if text:
text = "%r" % (str(text).strip())
return {
'banner': module.params['banner'],
'text': text,
'state': module.params['state']
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['pre-login', 'post-login']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(vyos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| hkariti/ansible | lib/ansible/modules/network/vyos/vyos_banner.py | Python | gpl-3.0 | 5,186 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Ansible CloudStack external inventory script.
=============================================
Generates Ansible inventory from CloudStack. Configuration is read from
'cloudstack.ini'. If you need to pass the project, write a simple wrapper
script, e.g. project_cloudstack.sh:
#!/bin/bash
cloudstack.py --project <your_project> $@
When run against a specific host, this script returns the following attributes
based on the data obtained from CloudStack API:
"web01": {
"cpu_number": 2,
"nic": [
{
"ip": "10.102.76.98",
"mac": "02:00:50:99:00:01",
"type": "Isolated",
"netmask": "255.255.255.0",
"gateway": "10.102.76.1"
},
{
"ip": "10.102.138.63",
"mac": "06:b7:5a:00:14:84",
"type": "Shared",
"netmask": "255.255.255.0",
"gateway": "10.102.138.1"
}
],
"default_ip": "10.102.76.98",
"zone": "ZUERICH",
"created": "2014-07-02T07:53:50+0200",
"hypervisor": "VMware",
"memory": 2048,
"state": "Running",
"tags": [],
"cpu_speed": 1800,
"affinity_group": [],
"service_offering": "Small",
"cpu_used": "62%"
}
usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] [--domain DOMAIN]
"""
from __future__ import print_function
import os
import sys
import argparse
try:
import json
except:
import simplejson as json
try:
from cs import CloudStack, CloudStackException, read_config
except ImportError:
print("Error: CloudStack library must be installed: pip install cs.",
file=sys.stderr)
sys.exit(1)
class CloudStackInventory(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
parser.add_argument('--project')
parser.add_argument('--domain')
options = parser.parse_args()
try:
self.cs = CloudStack(**read_config())
except CloudStackException as e:
print("Error: Could not connect to CloudStack API", file=sys.stderr)
sys.exit(1)
domain_id = None
if options.domain:
domain_id = self.get_domain_id(options.domain)
project_id = None
if options.project:
project_id = self.get_project_id(options.project, domain_id)
if options.host:
data = self.get_host(options.host, project_id, domain_id)
print(json.dumps(data, indent=2))
elif options.list:
data = self.get_list(project_id, domain_id)
print(json.dumps(data, indent=2))
else:
print("usage: --list | --host <hostname> [--project <project>] [--domain <domain_path>]",
file=sys.stderr)
sys.exit(1)
def get_domain_id(self, domain):
domains = self.cs.listDomains(listall=True)
if domains:
for d in domains['domain']:
if d['path'].lower() == domain.lower():
return d['id']
print("Error: Domain %s not found." % domain, file=sys.stderr)
sys.exit(1)
def get_project_id(self, project, domain_id=None):
projects = self.cs.listProjects(domainid=domain_id)
if projects:
for p in projects['project']:
if p['name'] == project or p['id'] == project:
return p['id']
print("Error: Project %s not found." % project, file=sys.stderr)
sys.exit(1)
def get_host(self, name, project_id=None, domain_id=None):
hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id)
data = {}
if not hosts:
return data
for host in hosts['virtualmachine']:
host_name = host['displayname']
if name == host_name:
data['zone'] = host['zonename']
if 'group' in host:
data['group'] = host['group']
data['state'] = host['state']
data['service_offering'] = host['serviceofferingname']
data['affinity_group'] = host['affinitygroup']
data['security_group'] = host['securitygroup']
data['cpu_number'] = host['cpunumber']
data['cpu_speed'] = host['cpuspeed']
if 'cpuused' in host:
data['cpu_used'] = host['cpuused']
data['memory'] = host['memory']
data['tags'] = host['tags']
data['hypervisor'] = host['hypervisor']
data['created'] = host['created']
data['nic'] = []
for nic in host['nic']:
data['nic'].append({
'ip': nic['ipaddress'],
'mac': nic['macaddress'],
'netmask': nic['netmask'],
'gateway': nic['gateway'],
'type': nic['type'],
})
if nic['isdefault']:
data['default_ip'] = nic['ipaddress']
break;
return data
def get_list(self, project_id=None, domain_id=None):
data = {
'all': {
'hosts': [],
},
'_meta': {
'hostvars': {},
},
}
groups = self.cs.listInstanceGroups(projectid=project_id, domainid=domain_id)
if groups:
for group in groups['instancegroup']:
group_name = group['name']
if group_name and not group_name in data:
data[group_name] = {
'hosts': []
}
hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id)
if not hosts:
return data
for host in hosts['virtualmachine']:
host_name = host['displayname']
data['all']['hosts'].append(host_name)
data['_meta']['hostvars'][host_name] = {}
# Make a group per zone
data['_meta']['hostvars'][host_name]['zone'] = host['zonename']
group_name = host['zonename']
if group_name not in data:
data[group_name] = {
'hosts': []
}
data[group_name]['hosts'].append(host_name)
if 'group' in host:
data['_meta']['hostvars'][host_name]['group'] = host['group']
data['_meta']['hostvars'][host_name]['state'] = host['state']
data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname']
data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup']
data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup']
data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber']
data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed']
if 'cpuused' in host:
data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused']
data['_meta']['hostvars'][host_name]['created'] = host['created']
data['_meta']['hostvars'][host_name]['memory'] = host['memory']
data['_meta']['hostvars'][host_name]['tags'] = host['tags']
data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor']
data['_meta']['hostvars'][host_name]['created'] = host['created']
data['_meta']['hostvars'][host_name]['nic'] = []
for nic in host['nic']:
data['_meta']['hostvars'][host_name]['nic'].append({
'ip': nic['ipaddress'],
'mac': nic['macaddress'],
'netmask': nic['netmask'],
'gateway': nic['gateway'],
'type': nic['type'],
})
if nic['isdefault']:
data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress']
group_name = ''
if 'group' in host:
group_name = host['group']
if group_name and group_name in data:
data[group_name]['hosts'].append(host_name)
return data
if __name__ == '__main__':
CloudStackInventory()
| crafty78/ansible | contrib/inventory/cloudstack.py | Python | gpl-3.0 | 9,221 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova import compute
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
UUID = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
last_add_fixed_ip = (None, None)
last_remove_fixed_ip = (None, None)
def compute_api_add_fixed_ip(self, context, instance, network_id):
global last_add_fixed_ip
last_add_fixed_ip = (instance['uuid'], network_id)
def compute_api_remove_fixed_ip(self, context, instance, address):
global last_remove_fixed_ip
last_remove_fixed_ip = (instance['uuid'], address)
def compute_api_get(self, context, instance_id, expected_attrs=None,
want_objects=False):
return {'id': 1, 'uuid': instance_id}
class FixedIpTest(test.NoDBTestCase):
def setUp(self):
super(FixedIpTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(compute.api.API, "add_fixed_ip",
compute_api_add_fixed_ip)
self.stubs.Set(compute.api.API, "remove_fixed_ip",
compute_api_remove_fixed_ip)
self.stubs.Set(compute.api.API, 'get', compute_api_get)
self.app = fakes.wsgi_app_v3(init_only=('servers', 'os-multinic'))
def test_add_fixed_ip(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(add_fixed_ip=dict(network_id='test_net'))
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
self.assertEqual(last_add_fixed_ip, (UUID, 'test_net'))
def test_add_fixed_ip_empty_network_id(self):
body = {'add_fixed_ip': {'network_id': ''}}
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
def test_add_fixed_ip_network_id_bigger_than_36(self):
body = {'add_fixed_ip': {'network_id': 'a' * 37}}
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
def test_add_fixed_ip_no_network(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(add_fixed_ip=dict())
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(last_add_fixed_ip, (None, None))
def test_remove_fixed_ip(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(remove_fixed_ip=dict(address='10.10.10.1'))
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
self.assertEqual(last_remove_fixed_ip, (UUID, '10.10.10.1'))
def test_remove_fixed_ip_invalid_address(self):
body = {'remove_fixed_ip': {'address': ''}}
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
def test_remove_fixed_ip_no_address(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(remove_fixed_ip=dict())
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(last_remove_fixed_ip, (None, None))
| eharney/nova | nova/tests/api/openstack/compute/plugins/v3/test_multinic.py | Python | apache-2.0 | 5,154 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
from webkitpy.common.system.deprecated_logging import error
class CheckStyle(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
Options.check_style,
Options.git_commit,
]
def run(self, state):
if not self._options.check_style:
return
os.chdir(self._tool.scm().checkout_root)
args = []
if self._options.git_commit:
args.append("--git-commit")
args.append(self._options.git_commit)
args.append("--diff-files")
args.extend(self._changed_files(state))
try:
self._tool.executive.run_and_throw_if_fail(self._tool.port().check_webkit_style_command() + args)
except ScriptError, e:
if self._options.non_interactive:
# We need to re-raise the exception here to have the
# style-queue do the right thing.
raise e
if not self._tool.user.confirm("Are you sure you want to continue?"):
exit(1)
| mogoweb/webkit_for_android5.1 | webkit/Tools/Scripts/webkitpy/tool/steps/checkstyle.py | Python | apache-2.0 | 2,811 |
from attr import dataclass
class A7:
x: int = 1
def __lt__(self, other):
pass
@dataclass(cmp=True)
class B7(A7):
y: str = "1"
print(A7() < B7())
print(B7() <error descr="'__lt__' not supported between instances of 'B7' and 'A7'"><</error> A7())
print(A7() < object())
print(B7() <error descr="'__lt__' not supported between instances of 'B7' and 'object'"><</error> object())
class A8:
x: int = 1
def __lt__(self, other):
pass
@dataclass(cmp=False)
class B8(A8):
y: str = "1"
print(A8() < B8())
print(B8() < A8())
print(A8() < object())
print(B8() < object())
@dataclass(cmp=True)
class A9:
x: int = 1
class B9(A9):
y: str = "1"
def __lt__(self, other):
pass
print(A9() < B9())
print(B9() < A9())
print(A9() <error descr="'__lt__' not supported between instances of 'A9' and 'object'"><</error> object())
print(B9() < object())
@dataclass(cmp=False)
class A10:
x: int = 1
class B10(A10):
y: str = "1"
def __lt__(self, other):
pass
print(A10() <error descr="'__lt__' not supported between instances of 'A10' and 'B10'"><</error> B10())
print(B10() < A10())
print(A10() <error descr="'__lt__' not supported between instances of 'A10' and 'object'"><</error> object())
print(B10() < object()) | siosio/intellij-community | python/testData/inspections/PyDataclassInspection/comparisonForManuallyOrderedInAttrsInheritance.py | Python | apache-2.0 | 1,285 |
"""
.. _tut_stats_cluster_source_2samp:
=========================================================================
2 samples permutation test on source data with spatio-temporal clustering
=========================================================================
Tests if the source space data are significantly different between
2 groups of subjects (simulated here using one subject's data).
The multiple comparisons problem is addressed with a cluster-level
permutation test across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from scipy import stats as stats
import mne
from mne import spatial_tris_connectivity, grade_to_tris
from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
stc_fname = data_path + '/MEG/sample/sample_audvis-meg-lh.stc'
subjects_dir = data_path + '/subjects'
# Load stc to in common cortical space (fsaverage)
stc = mne.read_source_estimate(stc_fname)
stc.resample(50)
stc = mne.morph_data('sample', 'fsaverage', stc, grade=5, smooth=20,
subjects_dir=subjects_dir)
n_vertices_fsave, n_times = stc.data.shape
tstep = stc.tstep
n_subjects1, n_subjects2 = 7, 9
print('Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2))
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X1 = np.random.randn(n_vertices_fsave, n_times, n_subjects1) * 10
X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10
X1[:, :, :] += stc.data[:, :, np.newaxis]
# make the activity bigger for the second set of subjects
X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]
# We want to compare the overall activity levels for each subject
X1 = np.abs(X1) # only magnitude
X2 = np.abs(X2) # only magnitude
###############################################################################
# Compute statistic
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(grade_to_tris(5))
# Note that X needs to be a list of multi-dimensional array of shape
# samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
threshold=f_threshold)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A != condition B
brain = stc_all_cluster_vis.plot('fsaverage', hemi='both', colormap='mne',
subjects_dir=subjects_dir,
time_label='Duration significant (ms)')
brain.set_data_time_index(0)
brain.show_view('lateral')
brain.save_image('clusters.png')
| trachelr/mne-python | tutorials/plot_cluster_stats_spatio_temporal_2samp.py | Python | bsd-3-clause | 4,321 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["deprecated"],
"supported_by": "network",
}
DOCUMENTATION = """module: net_logging
author: Ganesh Nalawade (@ganeshrn)
short_description: Manage logging on network devices
description:
- This module provides declarative management of logging on network devices.
deprecated:
removed_in: '2.13'
alternative: Use platform-specific "[netos]_logging" module
why: Updated modules released with more functionality
extends_documentation_fragment:
- ansible.netcommon.network_agnostic
options:
dest:
description:
- Destination of the logs.
choices:
- console
- host
name:
description:
- If value of C(dest) is I(host) it indicates file-name the host name to be notified.
facility:
description:
- Set logging facility.
level:
description:
- Set logging severity levels.
aggregate:
description: List of logging definitions.
purge:
description:
- Purge logging not defined in the I(aggregate) parameter.
default: false
state:
description:
- State of the logging configuration.
default: present
choices:
- present
- absent
"""
EXAMPLES = """
- name: configure console logging
net_logging:
dest: console
facility: any
level: critical
- name: remove console logging configuration
net_logging:
dest: console
state: absent
- name: configure host logging
net_logging:
dest: host
name: 192.0.2.1
facility: kernel
level: critical
- name: Configure file logging using aggregate
net_logging:
dest: file
aggregate:
- name: test-1
facility: pfe
level: critical
- name: test-2
facility: kernel
level: emergency
- name: Delete file logging using aggregate
net_logging:
dest: file
aggregate:
- name: test-1
facility: pfe
level: critical
- name: test-2
facility: kernel
level: emergency
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- logging console critical
"""
| Shaps/ansible | test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_logging.py | Python | gpl-3.0 | 2,503 |
"""
Constants specific to the SQL storage portion of the ORM.
"""
from collections import namedtuple
import re
# Valid query types (a set is used for speedy lookups). These are (currently)
# considered SQL-specific; other storage systems may choose to use different
# lookup types.
QUERY_TERMS = {
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search',
'regex', 'iregex',
}
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Namedtuples for sql.* internal use.
# Join lists (indexes into the tuples that are values in the alias_map
# dictionary in the Query class).
JoinInfo = namedtuple('JoinInfo',
'table_name rhs_alias join_type lhs_alias '
'join_cols nullable join_field')
# Pairs of column clauses to select, and (possibly None) field for the clause.
SelectInfo = namedtuple('SelectInfo', 'col field')
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
CURSOR = 'cursor'
NO_RESULTS = 'no results'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC'),
}
| simbha/mAngE-Gin | lib/django/db/models/sql/constants.py | Python | mit | 1,399 |
#!/usr/bin/python2.7
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unicodedata
import urllib
import urllib2
DOCOMO_URL = 'http://dengon.docomo.ne.jp/inoticelist.cgi'
DOCOMO_HIDDEN_RE = re.compile(
r'\<INPUT TYPE\=\"HIDDEN\" NAME\=\"ep\" VALUE\=\"(\w+)\"\>', re.I)
NUMBER_SEPARATOR_RE = re.compile(
ur'[\(\)\.\-\s\u2010-\u2015\u2212\u301c\u30fc\ufe58\ufe63\uff0d]')
PHONE_NUMBER_RE = re.compile(r'^\+?(01181|81)?(\d{9,11})$')
MOBILE_NUMBER_RE = re.compile(r'^0(7|8|9)0\d{8}$')
AU_URL_RE = re.compile(
r'\<a href\=\"(http:\/\/dengon\.ezweb\.ne\.jp\/[^\"]+)"\>', re.I)
DOCOMO_URL_RE = re.compile(
r'\<a href\=\"(http:\/\/dengon\.docomo\.ne\.jp\/[^\"]+)"\>', re.I)
SOFT_BANK_URL_RE = re.compile(
r'\<a href\=\"(http:\/\/dengon\.softbank\.ne\.jp\/[^\"]+)"\>', re.I)
WILLCOM_URL_RE = re.compile(
r'\<a href\=\"(http:\/\/dengon\.willcom\-inc\.com\/[^\"]+)"\>', re.I)
EMOBILE_URL_RE = re.compile(
r'\<a href\=\"(http:\/\/dengon\.emnet\.ne\.jp\/[^\"]+)"\>', re.I)
WEB171_URL_RE = re.compile(
r'<a href="(https://www\.web171\.jp/[^"]+)">', re.I)
# An re for an actual message stored at Docomo
DOCOMO_MESSAGE_RE = re.compile(
r'\<a href\=\"(http:\/\/dengon\.docomo\.ne\.jp\/' +
r'inoticelist\.cgi\?[^\"]+)".*\>', re.I)
def get_phone_number(string):
"""Normalize the given string, which may be a phone number, and returns
a normalized phone number if the string is a phone number, or None
otherwise. Gets rid of separator characters, converts unicode characters to
ascii chars, and if the phone number contains the country code for Japan
(81), strips of the code and prepend '0'.
Args:
string: unicode string to normalize.
Returns:
A normalized phone number if the input string is phone number, or
None otherwise.
"""
normalized = NUMBER_SEPARATOR_RE.sub(
'', unicodedata.normalize('NFKC', string))
number_match = PHONE_NUMBER_RE.match(normalized)
if number_match:
if number_match.groups()[0]:
return '0' + number_match.groups()[1]
else:
return number_match.groups()[1]
def is_mobile_number(string):
"""Tests the given string matches the pattern for the Japanese mobile phone
number.
Args:
string: unicode string that is stripped of phone number separators such
as '(', ')', and '-' and converted into ascii numeric characters.
Returns:
True if the string is a Jp mobile phone number, and False otherwise.
"""
return bool(MOBILE_NUMBER_RE.match(string))
def extract_redirect_url(scrape):
"""Tries to extract a further redirect URL for the correct mobile carrier
page from the given page scraped from Docomo. If finds a further redirect
url to other carrier's page, returns that final destination url, otherwise
returns None.
Args:
scrape: the scraped content from the url.
Returns:
url for further redirect to an appropriate mobile carrier's message
board page if it's found, otherwise None.
"""
au_urls = AU_URL_RE.findall(scrape)
if au_urls:
return au_urls[0]
soft_bank_urls = SOFT_BANK_URL_RE.findall(scrape)
if soft_bank_urls:
return soft_bank_urls[0]
willcom_urls = WILLCOM_URL_RE.findall(scrape)
if willcom_urls:
return willcom_urls[0]
emobile_urls = EMOBILE_URL_RE.findall(scrape)
if emobile_urls:
return emobile_urls[0]
web171_urls = WEB171_URL_RE.findall(scrape)
if web171_urls:
return web171_urls[0]
def docomo_has_messages(scrape):
"""Checks if Docomo has messages for a number being inquired in its own
system, that is, the given scrape contains urls for the stored messages.
Args:
scrape: the scraped content from Docomo.
Returns:
True if Docomo has messaes, and False otherwise.
"""
return bool(DOCOMO_MESSAGE_RE.findall(scrape))
def get_docomo_post_data(number, hidden_param):
"""Returns a mapping for POST data to Docomo's url to inquire for messages
for the given number.
Args:
number: a normalized mobile number.
Returns:
a mapping for the POST data.
"""
return {'es': 1,
'si': 1,
'bi1': 1,
'ep': hidden_param,
'sm': number}
def look_up_number(number):
"""Look up messages for the number, registered in the Japanese mobile
carriers-provided emergency message board services. The five Japanese mobile
carriers maintain separate message indices, but their systems can talk to
one another when they don't find messages for the given number in their own
indices. This function first talks to Docomo's system as a main entry point.
Docomo returns urls of registered messages if it finds ones in its system.
If it doesn't, Docomo's system talks to the other 4 carriers' and returns an
url for an appropriate carrier if messages are found. If no messages are
found registered for the number, Docomo's system simply indicates so.
Args:
number: A mobile phone number.
Returns:
A url for messages found registered to some carrier (including Docomo)
or None if no are found.
Throws:
Exception when failed to scrape.
"""
# Scrape Docomo's gateway page and get a hidden time stamp param.
scrape = urllib2.urlopen(DOCOMO_URL).read()
hidden_param = DOCOMO_HIDDEN_RE.findall(scrape)[0]
# Encode the number and the above param as POST data
data = get_docomo_post_data(number, hidden_param)
encoded_data = urllib.urlencode(data)
# Scrape Docomo's answer on the number
scrape = urllib2.urlopen(DOCOMO_URL, encoded_data).read()
# Extract a further redirect url, if any.
url = extract_redirect_url(scrape)
if url:
return url
elif docomo_has_messages(scrape):
# Checks if Docomo has messages for the number, and returns the url
# for Docomo if it does.
return DOCOMO_URL + '?' + encoded_data
def handle_phone_number(handler, query):
"""Handles a phone number query. If the query is a mobile phone number,
looks up the number for registered messages in the mobile carriers-provided
message board services and redirects to the results page. If the query is a
non-mobile phone number, shows a 171 suggestion.
Args:
handler: a request handler for this request.
query: a query string to the Person Finder query page.
Returns:
True if the query string is a phone number and has been properly
handled, and False otherwise.
"""
phone_number = get_phone_number(unicode(query))
if phone_number:
if is_mobile_number(phone_number):
url = look_up_number(phone_number)
if url:
handler.redirect(url)
else:
handler.render('results.html',
results=[], jp_phone_number_query=True)
else:
handler.render('query.html',
show_jp_171_suggestion=True)
return True
return False
| hsarmiento/people_finder_chile | app/jp_mobile_carriers.py | Python | apache-2.0 | 7,659 |
#
# Copyright (C) 2014
# Sean Poyser (seanpoyser@gmail.com)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import xbmc
xbmc.executebuiltin('RunAddon(plugin.program.super.favourites)')
xbmc.executebuiltin('Dialog.Close(all, true)') | mrquim/repository.mrquim | repo/plugin.program.super.favourites/launcher.py | Python | gpl-2.0 | 929 |
# -*- coding: utf-8 -*-
from reportlab.lib.colors import Color, CMYKColor, getAllNamedColors, toColor, \
HexColor
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
from reportlab.lib.units import inch, cm
import base64
import httplib
import logging
import mimetypes
import os.path
import re
import reportlab
import shutil
import string
import sys
import tempfile
import types
import urllib
import urllib2
import urlparse
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
rgb_re = re.compile("^.*?rgb[(]([0-9]+).*?([0-9]+).*?([0-9]+)[)].*?[ ]*$")
if not(reportlab.Version[0] == "2" and reportlab.Version[2] >= "1"):
raise ImportError("Reportlab Version 2.1+ is needed!")
REPORTLAB22 = (reportlab.Version[0] == "2" and reportlab.Version[2] >= "2")
# print "***", reportlab.Version, REPORTLAB22, reportlab.__file__
log = logging.getLogger("xhtml2pdf")
try:
import cStringIO as StringIO
except:
import StringIO
try:
import pyPdf
except:
pyPdf = None
try:
from reportlab.graphics import renderPM
except:
renderPM = None
try:
from reportlab.graphics import renderSVG
except:
renderSVG = None
#===============================================================================
# Memoize decorator
#===============================================================================
class memoized(object):
"""
A kwargs-aware memoizer, better than the one in python :)
Don't pass in too large kwargs, since this turns them into a tuple of tuples
Also, avoid mutable types (as usual for memoizers)
What this does is to create a dictionnary of {(*parameters):return value},
and uses it as a cache for subsequent calls to the same method.
It is especially useful for functions that don't rely on external variables
and that are called often. It's a perfect match for our getSize etc...
"""
def __init__(self, func):
self.cache = {}
self.func = func
self.__doc__ = self.func.__doc__ # To avoid great confusion
self.__name__ = self.func.__name__ # This also avoids great confusion
def __call__(self, *args, **kwargs):
# Make sure the following line is not actually slower than what you're
# trying to memoize
args_plus = tuple(kwargs.items())
key = (args, args_plus)
if key not in self.cache:
res = self.func(*args, **kwargs)
self.cache[key] = res
return self.cache[key]
def ErrorMsg():
"""
Helper to get a nice traceback as string
"""
import traceback, sys
type = value = tb = limit = None
type, value, tb = sys.exc_info()
list = traceback.format_tb(tb, limit) + traceback.format_exception_only(type, value)
return "Traceback (innermost last):\n" + "%-20s %s" % (
string.join(list[: - 1], ""),
list[ - 1])
def toList(value):
if type(value) not in (types.ListType, types.TupleType):
return [value]
return list(value)
#def _toColor(arg, default=None):
# '''try to map an arbitrary arg to a color instance'''
# if isinstance(arg, Color):
# return arg
# tArg = type(arg)
# if tArg in (types.ListType, types.TupleType):
# assert 3 <= len(arg) <= 4, 'Can only convert 3 and 4 sequences to color'
# assert 0 <= min(arg) and max(arg) <= 1
# return len(arg) == 3 and Color(arg[0], arg[1], arg[2]) or CMYKColor(arg[0], arg[1], arg[2], arg[3])
# elif tArg == types.StringType:
# C = getAllNamedColors()
# s = arg.lower()
# if C.has_key(s): return C[s]
# try:
# return toColor(eval(arg))
# except:
# pass
# try:
# return HexColor(arg)
# except:
# if default is None:
# raise ValueError('Invalid color value %r' % arg)
# return default
@memoized
def getColor(value, default=None):
"""
Convert to color value.
This returns a Color object instance from a text bit.
"""
if isinstance(value, Color):
return value
value = str(value).strip().lower()
if value == "transparent" or value == "none":
return default
if value in COLOR_BY_NAME:
return COLOR_BY_NAME[value]
if value.startswith("#") and len(value) == 4:
value = "#" + value[1] + value[1] + value[2] + value[2] + value[3] + value[3]
elif rgb_re.search(value):
# e.g., value = "<css function: rgb(153, 51, 153)>", go figure:
r, g, b = [int(x) for x in rgb_re.search(value).groups()]
value = "#%02x%02x%02x" % (r, g, b)
else:
# Shrug
pass
return toColor(value, default) # Calling the reportlab function
def getBorderStyle(value, default=None):
# log.debug(value)
if value and (str(value).lower() not in ("none", "hidden")):
return value
return default
mm = cm / 10.0
dpi96 = (1.0 / 96.0 * inch)
_absoluteSizeTable = {
"1": 50.0 / 100.0,
"xx-small": 50.0 / 100.0,
"x-small": 50.0 / 100.0,
"2": 75.0 / 100.0,
"small": 75.0 / 100.0,
"3": 100.0 / 100.0,
"medium": 100.0 / 100.0,
"4": 125.0 / 100.0,
"large": 125.0 / 100.0,
"5": 150.0 / 100.0,
"x-large": 150.0 / 100.0,
"6": 175.0 / 100.0,
"xx-large": 175.0 / 100.0,
"7": 200.0 / 100.0,
"xxx-large": 200.0 / 100.0,
#"xx-small" : 3./5.,
#"x-small": 3./4.,
#"small": 8./9.,
#"medium": 1./1.,
#"large": 6./5.,
#"x-large": 3./2.,
#"xx-large": 2./1.,
#"xxx-large": 3./1.,
}
_relativeSizeTable = {
"larger": 1.25,
"smaller": 0.75,
"+4": 200.0 / 100.0,
"+3": 175.0 / 100.0,
"+2": 150.0 / 100.0,
"+1": 125.0 / 100.0,
"-1": 75.0 / 100.0,
"-2": 50.0 / 100.0,
"-3": 25.0 / 100.0,
}
MIN_FONT_SIZE = 1.0
@memoized
def getSize(value, relative=0, base=None, default=0.0):
"""
Converts strings to standard sizes.
That is the function taking a string of CSS size ('12pt', '1cm' and so on)
and converts it into a float in a standard unit (in our case, points).
>>> getSize('12pt')
12.0
>>> getSize('1cm')
28.346456692913385
"""
try:
original = value
if value is None:
return relative
elif type(value) is types.FloatType:
return value
elif isinstance(value, int):
return float(value)
elif type(value) in (types.TupleType, types.ListType):
value = "".join(value)
value = str(value).strip().lower().replace(",", ".")
if value[-2:] == 'cm':
return float(value[:-2].strip()) * cm
elif value[-2:] == 'mm':
return (float(value[:-2].strip()) * mm) # 1mm = 0.1cm
elif value[-2:] == 'in':
return float(value[:-2].strip()) * inch # 1pt == 1/72inch
elif value[-2:] == 'inch':
return float(value[:-4].strip()) * inch # 1pt == 1/72inch
elif value[-2:] == 'pt':
return float(value[:-2].strip())
elif value[-2:] == 'pc':
return float(value[:-2].strip()) * 12.0 # 1pc == 12pt
elif value[-2:] == 'px':
return float(value[:-2].strip()) * dpi96 # XXX W3C says, use 96pdi http://www.w3.org/TR/CSS21/syndata.html#length-units
elif value[-1:] == 'i': # 1pt == 1/72inch
return float(value[:-1].strip()) * inch
elif value in ("none", "0", "auto"):
return 0.0
elif relative:
if value[-2:] == 'em': # XXX
return (float(value[:-2].strip()) * relative) # 1em = 1 * fontSize
elif value[-2:] == 'ex': # XXX
return (float(value[:-2].strip()) * (relative / 2.0)) # 1ex = 1/2 fontSize
elif value[-1:] == '%':
# print "%", value, relative, (relative * float(value[:-1].strip())) / 100.0
return (relative * float(value[:-1].strip())) / 100.0 # 1% = (fontSize * 1) / 100
elif value in ("normal", "inherit"):
return relative
elif _relativeSizeTable.has_key(value):
if base:
return max(MIN_FONT_SIZE, base * _relativeSizeTable[value])
return max(MIN_FONT_SIZE, relative * _relativeSizeTable[value])
elif _absoluteSizeTable.has_key(value):
if base:
return max(MIN_FONT_SIZE, base * _absoluteSizeTable[value])
return max(MIN_FONT_SIZE, relative * _absoluteSizeTable[value])
try:
value = float(value)
except:
log.warn("getSize: Not a float %r", value)
return default #value = 0
return max(0, value)
except Exception:
log.warn("getSize %r %r", original, relative, exc_info=1)
return default
@memoized
def getCoords(x, y, w, h, pagesize):
"""
As a stupid programmer I like to use the upper left
corner of the document as the 0,0 coords therefore
we need to do some fancy calculations
"""
#~ print pagesize
ax, ay = pagesize
if x < 0:
x = ax + x
if y < 0:
y = ay + y
if w != None and h != None:
if w <= 0:
w = (ax - x + w)
if h <= 0:
h = (ay - y + h)
return x, (ay - y - h), w, h
return x, (ay - y)
@memoized
def getBox(box, pagesize):
"""
Parse sizes by corners in the form:
<X-Left> <Y-Upper> <Width> <Height>
The last to values with negative values are interpreted as offsets form
the right and lower border.
"""
box = str(box).split()
if len(box) != 4:
raise Exception, "box not defined right way"
x, y, w, h = [getSize(pos) for pos in box]
return getCoords(x, y, w, h, pagesize)
def getFrameDimensions(data, page_width, page_height):
"""Calculate dimensions of a frame
Returns left, top, width and height of the frame in points.
"""
box = data.get("-pdf-frame-box", [])
if len(box) == 4:
return [getSize(x) for x in box]
top = getSize(data.get("top", 0))
left = getSize(data.get("left", 0))
bottom = getSize(data.get("bottom", 0))
right = getSize(data.get("right", 0))
if "height" in data:
height = getSize(data["height"])
if "top" in data:
top = getSize(data["top"])
bottom = page_height - (top + height)
elif "bottom" in data:
bottom = getSize(data["bottom"])
top = page_height - (bottom + height)
if "width" in data:
width = getSize(data["width"])
if "left" in data:
left = getSize(data["left"])
right = page_width - (left + width)
elif "right" in data:
right = getSize(data["right"])
left = page_width - (right + width)
top += getSize(data.get("margin-top", 0))
left += getSize(data.get("margin-left", 0))
bottom += getSize(data.get("margin-bottom", 0))
right += getSize(data.get("margin-right", 0))
width = page_width - (left + right)
height = page_height - (top + bottom)
return left, top, width, height
@memoized
def getPos(position, pagesize):
"""
Pair of coordinates
"""
position = str(position).split()
if len(position) != 2:
raise Exception, "position not defined right way"
x, y = [getSize(pos) for pos in position]
return getCoords(x, y, None, None, pagesize)
def getBool(s):
" Is it a boolean? "
return str(s).lower() in ("y", "yes", "1", "true")
_uid = 0
def getUID():
" Unique ID "
global _uid
_uid += 1
return str(_uid)
_alignments = {
"left": TA_LEFT,
"center": TA_CENTER,
"middle": TA_CENTER,
"right": TA_RIGHT,
"justify": TA_JUSTIFY,
}
def getAlign(value, default=TA_LEFT):
return _alignments.get(str(value).lower(), default)
#def getVAlign(value):
# # Unused
# return str(value).upper()
GAE = "google.appengine" in sys.modules
if GAE:
STRATEGIES = (
StringIO.StringIO,
StringIO.StringIO)
else:
STRATEGIES = (
StringIO.StringIO,
tempfile.NamedTemporaryFile)
class pisaTempFile(object):
"""A temporary file implementation that uses memory unless
either capacity is breached or fileno is requested, at which
point a real temporary file will be created and the relevant
details returned
If capacity is -1 the second strategy will never be used.
Inspired by:
http://code.activestate.com/recipes/496744/
"""
STRATEGIES = STRATEGIES
CAPACITY = 10 * 1024
def __init__(self, buffer="", capacity=CAPACITY):
"""Creates a TempFile object containing the specified buffer.
If capacity is specified, we use a real temporary file once the
file gets larger than that size. Otherwise, the data is stored
in memory.
"""
#if hasattr(buffer, "read"):
#shutil.copyfileobj( fsrc, fdst[, length])
self.capacity = capacity
self.strategy = int(len(buffer) > self.capacity)
try:
self._delegate = self.STRATEGIES[self.strategy]()
except:
# Fallback for Google AppEnginge etc.
self._delegate = self.STRATEGIES[0]()
self.write(buffer)
# we must set the file's position for preparing to read
self.seek(0)
def makeTempFile(self):
" Switch to next startegy. If an error occured stay with the first strategy "
if self.strategy == 0:
try:
new_delegate = self.STRATEGIES[1]()
new_delegate.write(self.getvalue())
self._delegate = new_delegate
self.strategy = 1
log.warn("Created temporary file %s", self.name)
except:
self.capacity = - 1
def getFileName(self):
" Get a named temporary file "
self.makeTempFile()
return self.name
def fileno(self):
"""Forces this buffer to use a temporary file as the underlying.
object and returns the fileno associated with it.
"""
self.makeTempFile()
return self._delegate.fileno()
def getvalue(self):
" Get value of file. Work around for second strategy "
if self.strategy == 0:
return self._delegate.getvalue()
self._delegate.flush()
self._delegate.seek(0)
return self._delegate.read()
def write(self, value):
" If capacity != -1 and length of file > capacity it is time to switch "
if self.capacity > 0 and self.strategy == 0:
len_value = len(value)
if len_value >= self.capacity:
needs_new_strategy = True
else:
self.seek(0, 2) # find end of file
needs_new_strategy = \
(self.tell() + len_value) >= self.capacity
if needs_new_strategy:
self.makeTempFile()
self._delegate.write(value)
def __getattr__(self, name):
try:
return getattr(self._delegate, name)
except AttributeError:
# hide the delegation
e = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, name)
raise AttributeError(e)
_rx_datauri = re.compile("^data:(?P<mime>[a-z]+/[a-z]+);base64,(?P<data>.*)$", re.M | re.DOTALL)
class pisaFileObject:
"""
XXX
"""
def __init__(self, uri, basepath=None):
self.basepath = basepath
self.mimetype = None
self.file = None
self.data = None
self.uri = None
self.local = None
self.tmp_file = None
uri = str(uri)
log.debug("FileObject %r, Basepath: %r", uri, basepath)
# Data URI
if uri.startswith("data:"):
m = _rx_datauri.match(uri)
self.mimetype = m.group("mime")
self.data = base64.decodestring(m.group("data"))
else:
# Check if we have an external scheme
if basepath and not urlparse.urlparse(uri).scheme:
urlParts = urlparse.urlparse(basepath)
else:
urlParts = urlparse.urlparse(uri)
log.debug("URLParts: %r", urlParts)
if urlParts.scheme == 'file':
if basepath and uri.startswith('/'):
uri = urlparse.urljoin(basepath, uri[1:])
urlResponse = urllib2.urlopen(uri)
self.mimetype = urlResponse.info().get("Content-Type", '').split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
# Drive letters have len==1 but we are looking for things like http:
elif urlParts.scheme in ('http', 'https'):
# External data
if basepath:
uri = urlparse.urljoin(basepath, uri)
#path = urlparse.urlsplit(url)[2]
#mimetype = getMimeType(path)
# Using HTTPLIB
server, path = urllib.splithost(uri[uri.find("//"):])
if uri.startswith("https://"):
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.request("GET", path)
r1 = conn.getresponse()
# log.debug("HTTP %r %r %r %r", server, path, uri, r1)
if (r1.status, r1.reason) == (200, "OK"):
self.mimetype = r1.getheader("Content-Type", '').split(";")[0]
self.uri = uri
if r1.getheader("content-encoding") == "gzip":
import gzip
self.file = gzip.GzipFile(mode="rb", fileobj=r1)
else:
self.file = r1
else:
urlResponse = urllib2.urlopen(uri)
self.mimetype = urlResponse.info().get("Content-Type", '').split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
else:
# Local data
if basepath:
uri = os.path.normpath(os.path.join(basepath, uri))
if os.path.isfile(uri):
self.uri = uri
self.local = uri
self.setMimeTypeByName(uri)
self.file = open(uri, "rb")
def getFile(self):
if self.file is not None:
return self.file
if self.data is not None:
return pisaTempFile(self.data)
return None
def getNamedFile(self):
if self.notFound():
return None
if self.local:
return str(self.local)
if not self.tmp_file:
self.tmp_file = tempfile.NamedTemporaryFile()
if self.file:
shutil.copyfileobj(self.file, self.tmp_file)
else:
self.tmp_file.write(self.getData())
self.tmp_file.flush()
return self.tmp_file.name
def getData(self):
if self.data is not None:
return self.data
if self.file is not None:
self.data = self.file.read()
return self.data
return None
def notFound(self):
return (self.file is None) and (self.data is None)
def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0]
def getFile(*a , **kw):
file = pisaFileObject(*a, **kw)
if file.notFound():
return None
return file
COLOR_BY_NAME = {
'activeborder': Color(212, 208, 200),
'activecaption': Color(10, 36, 106),
'aliceblue': Color(.941176, .972549, 1),
'antiquewhite': Color(.980392, .921569, .843137),
'appworkspace': Color(128, 128, 128),
'aqua': Color(0, 1, 1),
'aquamarine': Color(.498039, 1, .831373),
'azure': Color(.941176, 1, 1),
'background': Color(58, 110, 165),
'beige': Color(.960784, .960784, .862745),
'bisque': Color(1, .894118, .768627),
'black': Color(0, 0, 0),
'blanchedalmond': Color(1, .921569, .803922),
'blue': Color(0, 0, 1),
'blueviolet': Color(.541176, .168627, .886275),
'brown': Color(.647059, .164706, .164706),
'burlywood': Color(.870588, .721569, .529412),
'buttonface': Color(212, 208, 200),
'buttonhighlight': Color(255, 255, 255),
'buttonshadow': Color(128, 128, 128),
'buttontext': Color(0, 0, 0),
'cadetblue': Color(.372549, .619608, .627451),
'captiontext': Color(255, 255, 255),
'chartreuse': Color(.498039, 1, 0),
'chocolate': Color(.823529, .411765, .117647),
'coral': Color(1, .498039, .313725),
'cornflowerblue': Color(.392157, .584314, .929412),
'cornsilk': Color(1, .972549, .862745),
'crimson': Color(.862745, .078431, .235294),
'cyan': Color(0, 1, 1),
'darkblue': Color(0, 0, .545098),
'darkcyan': Color(0, .545098, .545098),
'darkgoldenrod': Color(.721569, .52549, .043137),
'darkgray': Color(.662745, .662745, .662745),
'darkgreen': Color(0, .392157, 0),
'darkgrey': Color(.662745, .662745, .662745),
'darkkhaki': Color(.741176, .717647, .419608),
'darkmagenta': Color(.545098, 0, .545098),
'darkolivegreen': Color(.333333, .419608, .184314),
'darkorange': Color(1, .54902, 0),
'darkorchid': Color(.6, .196078, .8),
'darkred': Color(.545098, 0, 0),
'darksalmon': Color(.913725, .588235, .478431),
'darkseagreen': Color(.560784, .737255, .560784),
'darkslateblue': Color(.282353, .239216, .545098),
'darkslategray': Color(.184314, .309804, .309804),
'darkslategrey': Color(.184314, .309804, .309804),
'darkturquoise': Color(0, .807843, .819608),
'darkviolet': Color(.580392, 0, .827451),
'deeppink': Color(1, .078431, .576471),
'deepskyblue': Color(0, .74902, 1),
'dimgray': Color(.411765, .411765, .411765),
'dimgrey': Color(.411765, .411765, .411765),
'dodgerblue': Color(.117647, .564706, 1),
'firebrick': Color(.698039, .133333, .133333),
'floralwhite': Color(1, .980392, .941176),
'forestgreen': Color(.133333, .545098, .133333),
'fuchsia': Color(1, 0, 1),
'gainsboro': Color(.862745, .862745, .862745),
'ghostwhite': Color(.972549, .972549, 1),
'gold': Color(1, .843137, 0),
'goldenrod': Color(.854902, .647059, .12549),
'gray': Color(.501961, .501961, .501961),
'graytext': Color(128, 128, 128),
'green': Color(0, .501961, 0),
'greenyellow': Color(.678431, 1, .184314),
'grey': Color(.501961, .501961, .501961),
'highlight': Color(10, 36, 106),
'highlighttext': Color(255, 255, 255),
'honeydew': Color(.941176, 1, .941176),
'hotpink': Color(1, .411765, .705882),
'inactiveborder': Color(212, 208, 200),
'inactivecaption': Color(128, 128, 128),
'inactivecaptiontext': Color(212, 208, 200),
'indianred': Color(.803922, .360784, .360784),
'indigo': Color(.294118, 0, .509804),
'infobackground': Color(255, 255, 225),
'infotext': Color(0, 0, 0),
'ivory': Color(1, 1, .941176),
'khaki': Color(.941176, .901961, .54902),
'lavender': Color(.901961, .901961, .980392),
'lavenderblush': Color(1, .941176, .960784),
'lawngreen': Color(.486275, .988235, 0),
'lemonchiffon': Color(1, .980392, .803922),
'lightblue': Color(.678431, .847059, .901961),
'lightcoral': Color(.941176, .501961, .501961),
'lightcyan': Color(.878431, 1, 1),
'lightgoldenrodyellow': Color(.980392, .980392, .823529),
'lightgray': Color(.827451, .827451, .827451),
'lightgreen': Color(.564706, .933333, .564706),
'lightgrey': Color(.827451, .827451, .827451),
'lightpink': Color(1, .713725, .756863),
'lightsalmon': Color(1, .627451, .478431),
'lightseagreen': Color(.12549, .698039, .666667),
'lightskyblue': Color(.529412, .807843, .980392),
'lightslategray': Color(.466667, .533333, .6),
'lightslategrey': Color(.466667, .533333, .6),
'lightsteelblue': Color(.690196, .768627, .870588),
'lightyellow': Color(1, 1, .878431),
'lime': Color(0, 1, 0),
'limegreen': Color(.196078, .803922, .196078),
'linen': Color(.980392, .941176, .901961),
'magenta': Color(1, 0, 1),
'maroon': Color(.501961, 0, 0),
'mediumaquamarine': Color(.4, .803922, .666667),
'mediumblue': Color(0, 0, .803922),
'mediumorchid': Color(.729412, .333333, .827451),
'mediumpurple': Color(.576471, .439216, .858824),
'mediumseagreen': Color(.235294, .701961, .443137),
'mediumslateblue': Color(.482353, .407843, .933333),
'mediumspringgreen': Color(0, .980392, .603922),
'mediumturquoise': Color(.282353, .819608, .8),
'mediumvioletred': Color(.780392, .082353, .521569),
'menu': Color(212, 208, 200),
'menutext': Color(0, 0, 0),
'midnightblue': Color(.098039, .098039, .439216),
'mintcream': Color(.960784, 1, .980392),
'mistyrose': Color(1, .894118, .882353),
'moccasin': Color(1, .894118, .709804),
'navajowhite': Color(1, .870588, .678431),
'navy': Color(0, 0, .501961),
'oldlace': Color(.992157, .960784, .901961),
'olive': Color(.501961, .501961, 0),
'olivedrab': Color(.419608, .556863, .137255),
'orange': Color(1, .647059, 0),
'orangered': Color(1, .270588, 0),
'orchid': Color(.854902, .439216, .839216),
'palegoldenrod': Color(.933333, .909804, .666667),
'palegreen': Color(.596078, .984314, .596078),
'paleturquoise': Color(.686275, .933333, .933333),
'palevioletred': Color(.858824, .439216, .576471),
'papayawhip': Color(1, .937255, .835294),
'peachpuff': Color(1, .854902, .72549),
'peru': Color(.803922, .521569, .247059),
'pink': Color(1, .752941, .796078),
'plum': Color(.866667, .627451, .866667),
'powderblue': Color(.690196, .878431, .901961),
'purple': Color(.501961, 0, .501961),
'red': Color(1, 0, 0),
'rosybrown': Color(.737255, .560784, .560784),
'royalblue': Color(.254902, .411765, .882353),
'saddlebrown': Color(.545098, .270588, .07451),
'salmon': Color(.980392, .501961, .447059),
'sandybrown': Color(.956863, .643137, .376471),
'scrollbar': Color(212, 208, 200),
'seagreen': Color(.180392, .545098, .341176),
'seashell': Color(1, .960784, .933333),
'sienna': Color(.627451, .321569, .176471),
'silver': Color(.752941, .752941, .752941),
'skyblue': Color(.529412, .807843, .921569),
'slateblue': Color(.415686, .352941, .803922),
'slategray': Color(.439216, .501961, .564706),
'slategrey': Color(.439216, .501961, .564706),
'snow': Color(1, .980392, .980392),
'springgreen': Color(0, 1, .498039),
'steelblue': Color(.27451, .509804, .705882),
'tan': Color(.823529, .705882, .54902),
'teal': Color(0, .501961, .501961),
'thistle': Color(.847059, .74902, .847059),
'threeddarkshadow': Color(64, 64, 64),
'threedface': Color(212, 208, 200),
'threedhighlight': Color(255, 255, 255),
'threedlightshadow': Color(212, 208, 200),
'threedshadow': Color(128, 128, 128),
'tomato': Color(1, .388235, .278431),
'turquoise': Color(.25098, .878431, .815686),
'violet': Color(.933333, .509804, .933333),
'wheat': Color(.960784, .870588, .701961),
'white': Color(1, 1, 1),
'whitesmoke': Color(.960784, .960784, .960784),
'window': Color(255, 255, 255),
'windowframe': Color(0, 0, 0),
'windowtext': Color(0, 0, 0),
'yellow': Color(1, 1, 0),
'yellowgreen': Color(.603922, .803922, .196078)}
| tecknicaltom/xhtml2pdf | xhtml2pdf/util.py | Python | apache-2.0 | 27,717 |
#!/usr/bin/python
"""Utility to generate files to benchmark"""
# Copyright Abel Sinkovics (abel@sinkovics.hu) 2016.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import argparse
import os
import string
import random
import re
import json
import Cheetah.Template
import chars
def regex_to_error_msg(regex):
"""Format a human-readable error message from a regex"""
return re.sub('([^\\\\])[()]', '\\1', regex) \
.replace('[ \t]*$', '') \
.replace('^', '') \
.replace('$', '') \
.replace('[ \t]*', ' ') \
.replace('[ \t]+', ' ') \
.replace('[0-9]+', 'X') \
\
.replace('\\[', '[') \
.replace('\\]', ']') \
.replace('\\(', '(') \
.replace('\\)', ')') \
.replace('\\.', '.')
def mkdir_p(path):
"""mkdir -p path"""
try:
os.makedirs(path)
except OSError:
pass
def in_comment(regex):
"""Builds a regex matching "regex" in a comment"""
return '^[ \t]*//[ \t]*' + regex + '[ \t]*$'
def random_chars(number):
"""Generate random characters"""
char_map = {
k: v for k, v in chars.CHARS.iteritems()
if not format_character(k).startswith('\\x')
}
char_num = sum(char_map.values())
return (
format_character(nth_char(char_map, random.randint(0, char_num - 1)))
for _ in xrange(0, number)
)
def random_string(length):
"""Generate a random string or character list depending on the mode"""
return \
'BOOST_METAPARSE_STRING("{0}")'.format(''.join(random_chars(length)))
class Mode(object):
"""Represents a generation mode"""
def __init__(self, name):
self.name = name
if name == 'BOOST_METAPARSE_STRING':
self.identifier = 'bmp'
elif name == 'manual':
self.identifier = 'man'
else:
raise Exception('Invalid mode: {0}'.format(name))
def description(self):
"""The description of the mode"""
if self.identifier == 'bmp':
return 'Using BOOST_METAPARSE_STRING'
elif self.identifier == 'man':
return 'Generating strings manually'
def convert_from(self, base):
"""Convert a BOOST_METAPARSE_STRING mode document into one with
this mode"""
if self.identifier == 'bmp':
return base
elif self.identifier == 'man':
result = []
prefix = 'BOOST_METAPARSE_STRING("'
while True:
bmp_at = base.find(prefix)
if bmp_at == -1:
return ''.join(result) + base
else:
result.append(
base[0:bmp_at] + '::boost::metaparse::string<'
)
new_base = ''
was_backslash = False
comma = ''
for i in xrange(bmp_at + len(prefix), len(base)):
if was_backslash:
result.append(
'{0}\'\\{1}\''.format(comma, base[i])
)
was_backslash = False
comma = ','
elif base[i] == '"':
new_base = base[i+2:]
break
elif base[i] == '\\':
was_backslash = True
else:
result.append('{0}\'{1}\''.format(comma, base[i]))
comma = ','
base = new_base
result.append('>')
class Template(object):
"""Represents a loaded template"""
def __init__(self, name, content):
self.name = name
self.content = content
def instantiate(self, value_of_n):
"""Instantiates the template"""
template = Cheetah.Template.Template(
self.content,
searchList={'n': value_of_n}
)
template.random_string = random_string
return str(template)
def range(self):
"""Returns the range for N"""
match = self._match(in_comment(
'n[ \t]+in[ \t]*\\[([0-9]+)\\.\\.([0-9]+)\\),[ \t]+'
'step[ \t]+([0-9]+)'
))
return range(
int(match.group(1)),
int(match.group(2)),
int(match.group(3))
)
def property(self, name):
"""Parses and returns a property"""
return self._get_line(in_comment(name + ':[ \t]*(.*)'))
def modes(self):
"""Returns the list of generation modes"""
return [Mode(s.strip()) for s in self.property('modes').split(',')]
def _match(self, regex):
"""Find the first line matching regex and return the match object"""
cregex = re.compile(regex)
for line in self.content.splitlines():
match = cregex.match(line)
if match:
return match
raise Exception('No "{0}" line in {1}.cpp'.format(
regex_to_error_msg(regex),
self.name
))
def _get_line(self, regex):
"""Get a line based on a regex"""
return self._match(regex).group(1)
def load_file(path):
"""Returns the content of the file"""
with open(path, 'rb') as in_file:
return in_file.read()
def templates_in(path):
"""Enumerate the templates found in path"""
ext = '.cpp'
return (
Template(f[0:-len(ext)], load_file(os.path.join(path, f)))
for f in os.listdir(path) if f.endswith(ext)
)
def nth_char(char_map, index):
"""Returns the nth character of a character->occurrence map"""
for char in char_map:
if index < char_map[char]:
return char
index = index - char_map[char]
return None
def format_character(char):
"""Returns the C-formatting of the character"""
if \
char in string.ascii_letters \
or char in string.digits \
or char in [
'_', '.', ':', ';', ' ', '!', '?', '+', '-', '/', '=', '<',
'>', '$', '(', ')', '@', '~', '`', '|', '#', '[', ']', '{',
'}', '&', '*', '^', '%']:
return char
elif char in ['"', '\'', '\\']:
return '\\{0}'.format(char)
elif char == '\n':
return '\\n'
elif char == '\r':
return '\\r'
elif char == '\t':
return '\\t'
else:
return '\\x{:02x}'.format(ord(char))
def write_file(filename, content):
"""Create the file with the given content"""
print 'Generating {0}'.format(filename)
with open(filename, 'wb') as out_f:
out_f.write(content)
def out_filename(template, n_val, mode):
"""Determine the output filename"""
return '{0}_{1}_{2}.cpp'.format(template.name, n_val, mode.identifier)
def main():
"""The main function of the script"""
desc = 'Generate files to benchmark'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--src',
dest='src_dir',
default='src',
help='The directory containing the templates'
)
parser.add_argument(
'--out',
dest='out_dir',
default='generated',
help='The output directory'
)
parser.add_argument(
'--seed',
dest='seed',
default='13',
help='The random seed (to ensure consistent regeneration)'
)
args = parser.parse_args()
random.seed(int(args.seed))
mkdir_p(args.out_dir)
for template in templates_in(args.src_dir):
modes = template.modes()
n_range = template.range()
for n_value in n_range:
base = template.instantiate(n_value)
for mode in modes:
write_file(
os.path.join(
args.out_dir,
out_filename(template, n_value, mode)
),
mode.convert_from(base)
)
write_file(
os.path.join(args.out_dir, '{0}.json'.format(template.name)),
json.dumps({
'files': {
n: {
m.identifier: out_filename(template, n, m)
for m in modes
} for n in n_range
},
'name': template.name,
'x_axis_label': template.property('x_axis_label'),
'desc': template.property('desc'),
'modes': {m.identifier: m.description() for m in modes}
})
)
if __name__ == '__main__':
main()
| stan-dev/math | lib/boost_1.75.0/libs/metaparse/tools/benchmark/generate.py | Python | bsd-3-clause | 8,790 |
# flake8: noqa
from __future__ import absolute_import
from .filterset import FilterSet
from .filters import *
__version__ = '0.9.2'
def parse_version(version):
'''
'0.1.2-dev' -> (0, 1, 2, 'dev')
'0.1.2' -> (0, 1, 2)
'''
v = version.split('.')
v = v[:-1] + v[-1].split('-')
ret = []
for p in v:
if p.isdigit():
ret.append(int(p))
else:
ret.append(p)
return tuple(ret)
VERSION = parse_version(__version__)
| andela-bojengwa/talk | venv/lib/python2.7/site-packages/django_filters/__init__.py | Python | mit | 485 |
#!./uwsgi --http-socket :9090 --async 100 ...
# same chat example but using uwsgi async api
# for pypy + continulets just run:
# uwsgi --http-socket :9090 --pypy-home /opt/pypy --pypy-wsgi-file tests/websockets_chat_async.py --pypy-eval "uwsgi_pypy_setup_continulets()" --async 100
import uwsgi
import time
import redis
import sys
def application(env, sr):
ws_scheme = 'ws'
if 'HTTPS' in env or env['wsgi.url_scheme'] == 'https':
ws_scheme = 'wss'
if env['PATH_INFO'] == '/':
sr('200 OK', [('Content-Type', 'text/html')])
output = """
<html>
<head>
<script language="Javascript">
var s = new WebSocket("%s://%s/foobar/");
s.onopen = function() {
alert("connected !!!");
s.send("ciao");
};
s.onmessage = function(e) {
var bb = document.getElementById('blackboard')
var html = bb.innerHTML;
bb.innerHTML = html + '<br/>' + e.data;
};
s.onerror = function(e) {
alert(e);
}
s.onclose = function(e) {
alert("connection closed");
}
function invia() {
var value = document.getElementById('testo').value;
s.send(value);
}
</script>
</head>
<body>
<h1>WebSocket</h1>
<input type="text" id="testo"/>
<input type="button" value="invia" onClick="invia();"/>
<div id="blackboard" style="width:640px;height:480px;background-color:black;color:white;border: solid 2px red;overflow:auto">
</div>
</body>
</html>
""" % (ws_scheme, env['HTTP_HOST'])
if sys.version_info[0] > 2:
return output.encode('latin1')
return output
elif env['PATH_INFO'] == '/favicon.ico':
return ""
elif env['PATH_INFO'] == '/foobar/':
uwsgi.websocket_handshake(env['HTTP_SEC_WEBSOCKET_KEY'], env.get('HTTP_ORIGIN', ''))
print("websockets...")
r = redis.StrictRedis(host='localhost', port=6379, db=0)
channel = r.pubsub()
channel.subscribe('foobar')
websocket_fd = uwsgi.connection_fd()
redis_fd = channel.connection._sock.fileno()
while True:
uwsgi.wait_fd_read(websocket_fd, 3)
uwsgi.wait_fd_read(redis_fd)
uwsgi.suspend()
fd = uwsgi.ready_fd()
if fd > -1:
if fd == websocket_fd:
msg = uwsgi.websocket_recv_nb()
if msg:
r.publish('foobar', msg)
elif fd == redis_fd:
msg = channel.parse_response()
print(msg)
# only interested in user messages
t = 'message'
if sys.version_info[0] > 2:
t = b'message'
if msg[0] == t:
uwsgi.websocket_send("[%s] %s" % (time.time(), msg))
else:
# on timeout call websocket_recv_nb again to manage ping/pong
msg = uwsgi.websocket_recv_nb()
if msg:
r.publish('foobar', msg)
| goal/uwsgi | tests/websockets_chat_async.py | Python | gpl-2.0 | 3,284 |
"""Sanity test using rstcheck."""
from __future__ import absolute_import, print_function
import os
from lib.sanity import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
)
from lib.util import (
SubprocessError,
run_command,
parse_to_dict,
find_executable,
)
from lib.config import (
SanityConfig,
)
class RstcheckTest(SanitySingleVersion):
"""Sanity test using rstcheck."""
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: SanityResult
"""
with open('test/sanity/rstcheck/ignore-substitutions.txt', 'r') as ignore_fd:
ignore_substitutions = sorted(set(ignore_fd.read().splitlines()))
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] in ('.rst',))
if not paths:
return SanitySkipped(self.name)
cmd = [
'python%s' % args.python_version,
find_executable('rstcheck'),
'--report', 'warning',
'--ignore-substitutions', ','.join(ignore_substitutions),
] + paths
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stdout:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+): \((?P<level>INFO|WARNING|ERROR|SEVERE)/[0-4]\) (?P<message>.*)$'
results = [parse_to_dict(pattern, line) for line in stderr.splitlines()]
results = [SanityMessage(
message=r['message'],
path=r['path'],
line=int(r['line']),
column=0,
level=r['level'],
) for r in results]
if results:
return SanityFailure(self.name, messages=results)
return SanitySuccess(self.name)
| tsdmgz/ansible | test/runner/lib/sanity/rstcheck.py | Python | gpl-3.0 | 2,126 |
def handle(foo, *args, <error descr="multiple * parameters are not allowed">*moreargs</error>):
print(foo, args, moreargs)
def handle(foo, *args: int, <error descr="multiple * parameters are not allowed">*moreargs: int</error>):
print(foo, args, moreargs) | dahlstrom-g/intellij-community | python/testData/highlighting/multiplePositionalContainers.py | Python | apache-2.0 | 264 |
class OAuthToolkitError(Exception):
"""
Base class for exceptions
"""
def __init__(self, error=None, redirect_uri=None, *args, **kwargs):
super(OAuthToolkitError, self).__init__(*args, **kwargs)
self.oauthlib_error = error
if redirect_uri:
self.oauthlib_error.redirect_uri = redirect_uri
class FatalClientError(OAuthToolkitError):
"""
Class for critical errors
"""
pass
| ramcn/demo3 | venv/lib/python3.4/site-packages/oauth2_provider/exceptions.py | Python | mit | 441 |
try:
try:
import ubinascii as binascii
except ImportError:
import binascii
except ImportError:
print("SKIP")
raise SystemExit
print(binascii.unhexlify(b'0001020304050607'))
print(binascii.unhexlify(b'08090a0b0c0d0e0f'))
print(binascii.unhexlify(b'7f80ff'))
print(binascii.unhexlify(b'313233344142434461626364'))
try:
a = binascii.unhexlify(b'0') # odd buffer length
except ValueError:
print('ValueError')
try:
a = binascii.unhexlify(b'gg') # digit not hex
except ValueError:
print('ValueError')
| AriZuu/micropython | tests/extmod/ubinascii_unhexlify.py | Python | mit | 546 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This module contains functions and methods to authenticate with OAuth1
providers.
"""
__revision__ = \
"$Id$"
from invenio.containerutils import get_substructure
from invenio.dbquery import run_sql
from invenio.external_authentication import ExternalAuth
class ExternalOAuth1(ExternalAuth):
"""
Contains methods for authenticate with an OpenID provider.
"""
@staticmethod
def __init_req(req):
req.g['oauth1_provider_name'] = ''
req.g['oauth1_debug'] = 0
req.g['oauth1_msg'] = ''
req.g['oauth1_debug_msg'] = ''
req.g['oauth1_response'] = None
def auth_user(self, username, password, req=None):
"""
Tries to find email and identity of the user from OAuth1 provider. If it
doesn't find any of them, returns (None, None)
@param username: Isn't used in this function
@type username: str
@param password: Isn't used in this function
@type password: str
@param req: request
@type req: invenio.webinterface_handler_wsgi.SimulatedModPythonRequest
@rtype: str|NoneType, str|NoneType
"""
from invenio.access_control_config import CFG_OAUTH1_CONFIGURATIONS
from invenio.access_control_config import CFG_OAUTH1_PROVIDERS
from invenio.webinterface_handler import wash_urlargd
from rauth.service import OAuth1Service
self.__init_req(req)
args = wash_urlargd(req.form, {'provider': (str, ''),
'login_method': (str, ''),
'oauth_token': (str, ''),
'oauth_verifier': (str, ''),
'denied': (str, '')
})
provider_name = req.g['oauth1_provider_name'] = args['provider']
if not provider_name in CFG_OAUTH1_PROVIDERS:
req.g['oauth1_msg'] = 22
return None, None
# Load the configurations to construct OAuth1 service
config = CFG_OAUTH1_CONFIGURATIONS[args['provider']]
req.g['oauth1_debug'] = config.get('debug', 0)
if not args['oauth_token']:
# In case of an error, display corresponding message
if args['denied']:
req.g['oauth1_msg'] = 21
return None, None
else:
req.g['oauth1_msg'] = 22
return None, None
provider = OAuth1Service(
name = req.g['oauth1_provider_name'],
consumer_key = config['consumer_key'],
consumer_secret = config['consumer_secret'],
request_token_url = config['request_token_url'],
access_token_url = config['access_token_url'],
authorize_url = config['authorize_url'],
header_auth = True)
# Get the request token secret from database and exchange it with the
# access token.
query = """SELECT secret FROM oauth1_storage WHERE token = %s"""
params = (args['oauth_token'],)
try:
# If the request token is already used, return
request_token_secret = run_sql(query, params)[0][0]
except IndexError:
req.g['oauth1_msg'] = 22
return None, None
response = provider.get_access_token(
'GET',
request_token = args['oauth_token'],
request_token_secret = request_token_secret,
params = {
'oauth_verifier': args['oauth_verifier']
}
)
if req.g['oauth1_debug']:
req.g['oauth1_debug_msg'] = str(response.content) + "<br/>"
# Some providers send the identity and access token together.
email, identity = self._get_user_email_and_id(response.content, req)
if not identity and config.has_key('request_url'):
# For some providers, to reach user profile we need to make request
# to a specific url.
params = config.get('request_parameters', {})
response = provider.get(config['request_url'],
params = params,
access_token = response.content['oauth_token'],
access_token_secret = response.content['oauth_token_secret']
)
if req.oauth1_debug:
req.g['oauth1_debug_msg'] += str(response.content) + "<br/>"
email, identity = self._get_user_email_and_id(response.content, req)
if identity:
# If identity is found, add the name of the provider at the
# beginning of the identity because different providers may have
# different users with same id.
identity = "%s:%s" % (req.g['oauth1_provider_name'], identity)
else:
req.g['oauth1_msg'] = 23
# Delete the token saved in the database since it is useless now.
query = """
DELETE FROM oauth1_storage
WHERE token=%s
OR date_creation < DATE_SUB(NOW(), INTERVAL 1 HOUR)
"""
params = (args['oauth_token'],)
run_sql(query, params)
if req.g['oauth1_debug']:
req.g['oauth1_msg'] = "<code>%s</code>" % req.g['oauth1_debug_msg'].replace("\n", "<br/>")
return None, None
return email, identity
def fetch_user_nickname(self, username, password=None, req=None):
"""
Fetches the OAuth1 provider for nickname of the user. If it doesn't
find any, returns None.
This function doesn't need username, password or req. They are exist
just because this class is derived from ExternalAuth
@param username: Isn't used in this function
@type username: str
@param password: Isn't used in this function
@type password: str
@param req: Isn't used in this function
@type req: invenio.webinterface_handler_wsgi.SimulatedModPythonRequest
@rtype: str or NoneType
"""
from invenio.access_control_config import CFG_OAUTH1_CONFIGURATIONS
if req.g['oauth1_provider_name']:
path = None
if CFG_OAUTH1_CONFIGURATIONS[req.g['oauth1_provider_name']].has_key(
'nickname'
):
path = CFG_OAUTH1_CONFIGURATIONS[req.g['oauth1_provider_name']]['nickname']
if path:
return get_substructure(req.oauth1_response, path)
else:
return None
def _get_user_email_and_id(self, container, req):
"""
Returns external identity and email address together. Since identity is
essential for OAuth1 authentication, if it doesn't find external
identity returns None, None.
@param container: container which contains email and id
@type container: list|dict
@rtype str|NoneType, str|NoneType
"""
from invenio.access_control_config import CFG_OAUTH1_CONFIGURATIONS
identity = None
email = None
if CFG_OAUTH1_CONFIGURATIONS[req.g['oauth1_provider_name']].has_key('id'):
path = CFG_OAUTH1_CONFIGURATIONS[req.g['oauth1_provider_name']]['id']
identity = get_substructure(container, path)
if identity:
if CFG_OAUTH1_CONFIGURATIONS[req.g['oauth1_provider_name']].has_key('email'):
path = CFG_OAUTH1_CONFIGURATIONS[req.g['oauth1_provider_name']]['email']
email = get_substructure(container, path)
req.g['oauth1_response'] = container
return email, identity
@staticmethod
def get_msg(req):
return req.g['oauth1_msg']
| Panos512/invenio | modules/webaccess/lib/external_authentication_oauth1.py | Python | gpl-2.0 | 8,849 |
# -*- coding: utf-8 -*-
"""Converts an IRI to a URI."""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
import urllib.parse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF),
(0xE000, 0xF8FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD),
(0x20000, 0x2FFFD),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD),
(0x50000, 0x5FFFD),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD),
(0x80000, 0x8FFFD),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD),
(0xB0000, 0xBFFFD),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD),
(0x100000, 0x10FFFD),
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % o for o in c.encode("utf-8")])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri, str):
(scheme, authority, path, query, fragment) = urllib.parse.urlsplit(uri)
authority = authority.encode("idna").decode("utf-8")
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urllib.parse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
"ftp://ftp.is.co.za/rfc/rfc1808.txt",
"http://www.ietf.org/rfc/rfc2396.txt",
"ldap://[2001:db8::7]/c=GB?objectClass?one",
"mailto:John.Doe@example.com",
"news:comp.infosystems.www.servers.unix",
"tel:+1-816-555-1212",
"telnet://192.0.2.16:80/",
"urn:oasis:names:specification:docbook:dtd:xml:4.1.2",
]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
"""Test that the right type of escaping is done for each part of the URI."""
self.assertEqual(
"http://xn--o3h.com/%E2%98%84",
iri2uri("http://\N{COMET}.com/\N{COMET}"),
)
self.assertEqual(
"http://bitworking.org/?fred=%E2%98%84",
iri2uri("http://bitworking.org/?fred=\N{COMET}"),
)
self.assertEqual(
"http://bitworking.org/#%E2%98%84",
iri2uri("http://bitworking.org/#\N{COMET}"),
)
self.assertEqual("#%E2%98%84", iri2uri("#\N{COMET}"))
self.assertEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"),
)
self.assertEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri(iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")),
)
self.assertNotEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri(
"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode("utf-8")
),
)
unittest.main()
| endlessm/chromium-browser | tools/swarming_client/third_party/httplib2/python3/httplib2/iri2uri.py | Python | bsd-3-clause | 4,153 |
"""
Demo platform that has two fake alarm control panels.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
import homeassistant.components.alarm_control_panel.manual as manual
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo alarm control panel platform."""
add_devices([
manual.ManualAlarm(hass, 'Alarm', '1234', 5, 10, False),
])
| Smart-Torvy/torvy-home-assistant | homeassistant/components/alarm_control_panel/demo.py | Python | mit | 462 |
#!/usr/bin/env python
# Author: Shao Zhang and Phil Saltzman
# Last Updated: 2015-03-13
#
# This tutorial is intended as a initial panda scripting lesson going over
# display initialization, loading models, placing objects, and the scene graph.
#
# Step 3: In this step, we create a function called loadPlanets, which will
# eventually be used to load all of the planets in our simulation. For now
# we will load just the sun and and the sky-sphere we use to create the
# star-field.
from direct.showbase.ShowBase import ShowBase
base = ShowBase()
from panda3d.core import NodePath, TextNode
from direct.gui.DirectGui import *
import sys
class World(object):
def __init__(self):
# This is the initialization we had before
self.title = OnscreenText( # Create the title
text="Panda3D: Tutorial 1 - Solar System",
parent=base.a2dBottomRight, align=TextNode.A_right,
style=1, fg=(1, 1, 1, 1), pos=(-0.1, 0.1), scale=.07)
base.setBackgroundColor(0, 0, 0) # Set the background to black
base.disableMouse() # disable mouse control of the camera
camera.setPos(0, 0, 45) # Set the camera position (X, Y, Z)
camera.setHpr(0, -90, 0) # Set the camera orientation
#(heading, pitch, roll) in degrees
# We will now define a variable to help keep a consistent scale in
# our model. As we progress, we will continue to add variables here as we
# need them
# The value of this variable scales the size of the planets. True scale size
# would be 1
self.sizescale = 0.6
# Now that we have finished basic initialization, we call loadPlanets which
# will handle actually getting our objects in the world
self.loadPlanets()
def loadPlanets(self):
# Here, inside our class, is where we are creating the loadPlanets function
# For now we are just loading the star-field and sun. In the next step we
# will load all of the planets
# Loading objects in Panda is done via the command loader.loadModel, which
# takes one argument, the path to the model file. Models in Panda come in
# two types, .egg (which is readable in a text editor), and .bam (which is
# not readable but makes smaller files). When you load a file you leave the
# extension off so that it can choose the right version
# Load model returns a NodePath, which you can think of as an object
# containing your model
# Here we load the sky model. For all the planets we will use the same
# sphere model and simply change textures. However, even though the sky is
# a sphere, it is different from the planet model because its polygons
#(which are always one-sided in Panda) face inside the sphere instead of
# outside (this is known as a model with reversed normals). Because of
# that it has to be a separate model.
self.sky = loader.loadModel("models/solar_sky_sphere")
# After the object is loaded, it must be placed in the scene. We do this by
# changing the parent of self.sky to render, which is a special NodePath.
# Each frame, Panda starts with render and renders everything attached to
# it.
self.sky.reparentTo(render)
# You can set the position, orientation, and scale on a NodePath the same
# way that you set those properties on the camera. In fact, the camera is
# just another special NodePath
self.sky.setScale(40)
# Very often, the egg file will know what textures are needed and load them
# automatically. But sometimes we want to set our textures manually, (for
# instance we want to put different textures on the same planet model)
# Loading textures works the same way as loading models, but instead of
# calling loader.loadModel, we call loader.loadTexture
self.sky_tex = loader.loadTexture("models/stars_1k_tex.jpg")
# Finally, the following line sets our new sky texture on our sky model.
# The second argument must be one or the command will be ignored.
self.sky.setTexture(self.sky_tex, 1)
# Now we load the sun.
self.sun = loader.loadModel("models/planet_sphere")
# Now we repeat our other steps
self.sun.reparentTo(render)
self.sun_tex = loader.loadTexture("models/sun_1k_tex.jpg")
self.sun.setTexture(self.sun_tex, 1)
# The sun is really much bigger than
self.sun.setScale(2 * self.sizescale)
# this, but to be able to see the
# planets we're making it smaller
# end loadPlanets()
# end class world
# instantiate the class
w = World()
base.run()
| brakhane/panda3d | samples/solar-system/step3_load_model.py | Python | bsd-3-clause | 4,775 |
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is the Python XPCOM language bindings.
#
# The Initial Developer of the Original Code is
# ActiveState Tool Corp.
# Portions created by the Initial Developer are Copyright (C) 2000, 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Hammond <MarkH@ActiveState.com> (original author)
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""Implementation of Python file objects for Mozilla/xpcom.
Introduction:
This module defines various class that are implemented using
Mozilla streams. This allows you to open Mozilla URI's, and
treat them as Python file object.
Example:
>>> file = URIFile("chrome://whatever")
>>> data = file.read(5) # Pass no arg to read everything.
Known Limitations:
* Not all URL schemes will work from "python.exe" - most notably
"chrome://" and "http://" URLs - this is because a simple initialization of
xpcom by Python does not load up the full set of Mozilla URL handlers.
If you can work out how to correctly initialize the chrome registry and
setup a message queue.
Known Bugs:
* Only read ("r") mode is supported. Although write ("w") mode doesnt make
sense for HTTP type URLs, it potentially does for file:// etc type ones.
* No concept of text mode vs binary mode. It appears Mozilla takes care of
this internally (ie, all "text/???" mime types are text, rest are binary)
"""
from xpcom import components, Exception, _xpcom
import os
import threading # for locks.
NS_RDONLY = 0x01
NS_WRONLY = 0x02
NS_RDWR = 0x04
NS_CREATE_FILE = 0x08
NS_APPEND = 0x10
NS_TRUNCATE = 0x20
NS_SYNC = 0x40
NS_EXCL = 0x80
# A helper function that may come in useful
def LocalFileToURL(localFileName):
"Convert a filename to an XPCOM nsIFileURL object."
# Create an nsILocalFile
localFile = components.classes["@mozilla.org/file/local;1"] \
.createInstance(components.interfaces.nsILocalFile)
localFile.initWithPath(localFileName)
# Use the IO Service to create the interface, then QI for a FileURL
io_service = components.classes["@mozilla.org/network/io-service;1"] \
.getService(components.interfaces.nsIIOService)
url = io_service.newFileURI(localFile).queryInterface(components.interfaces.nsIFileURL)
# Setting the "file" attribute causes initialization...
url.file = localFile
return url
# A base class for file objects.
class _File:
def __init__(self, name_thingy = None, mode="r"):
self.lockob = threading.Lock()
self.inputStream = self.outputStream = None
if name_thingy is not None:
self.init(name_thingy, mode)
def __del__(self):
self.close()
# The Moz file streams are not thread safe.
def _lock(self):
self.lockob.acquire()
def _release(self):
self.lockob.release()
def read(self, n = -1):
assert self.inputStream is not None, "Not setup for read!"
self._lock()
try:
return str(self.inputStream.read(n))
finally:
self._release()
def readlines(self):
# Not part of the xpcom interface, but handy for direct Python users.
# Not 100% faithful, but near enough for now!
lines = self.read().split("\n")
if len(lines) and len(lines[-1]) == 0:
lines = lines[:-1]
return [s+"\n" for s in lines ]
def write(self, data):
assert self.outputStream is not None, "Not setup for write!"
self._lock()
try:
self.outputStream.write(data, len(data))
finally:
self._release()
def close(self):
self._lock()
try:
if self.inputStream is not None:
self.inputStream.close()
self.inputStream = None
if self.outputStream is not None:
self.outputStream.close()
self.outputStream = None
self.channel = None
finally:
self._release()
def flush(self):
self._lock()
try:
if self.outputStream is not None: self.outputStream.flush()
finally:
self._release()
# A synchronous "file object" used to open a URI.
class URIFile(_File):
def init(self, url, mode="r"):
self.close()
if mode != "r":
raise ValueError, "only 'r' mode supported'"
io_service = components.classes["@mozilla.org/network/io-service;1"] \
.getService(components.interfaces.nsIIOService)
if hasattr(url, "queryInterface"):
url_ob = url
else:
url_ob = io_service.newURI(url, None, None)
# Mozilla asserts and starts saying "NULL POINTER" if this is wrong!
if not url_ob.scheme:
raise ValueError, ("The URI '%s' is invalid (no scheme)"
% (url_ob.spec,))
self.channel = io_service.newChannelFromURI(url_ob)
self.inputStream = self.channel.open()
# A "file object" implemented using Netscape's native file support.
# Based on io.js - http://lxr.mozilla.org/seamonkey/source/xpcom/tests/utils/io.js
# You open this file using a local file name (as a string) so it really is pointless -
# you may as well be using a standard Python file object!
class LocalFile(_File):
def __init__(self, *args):
self.fileIO = None
_File.__init__(self, *args)
def init(self, name, mode = "r"):
name = os.path.abspath(name) # Moz libraries under Linux fail with relative paths.
self.close()
file = components.classes['@mozilla.org/file/local;1'].createInstance("nsILocalFile")
file.initWithPath(name)
if mode in ["w","a"]:
self.fileIO = components.classes["@mozilla.org/network/file-output-stream;1"].createInstance("nsIFileOutputStream")
if mode== "w":
if file.exists():
file.remove(0)
moz_mode = NS_CREATE_FILE | NS_WRONLY
elif mode=="a":
moz_mode = NS_APPEND
else:
assert 0, "Can't happen!"
self.fileIO.init(file, moz_mode, -1,0)
self.outputStream = self.fileIO
elif mode == "r":
self.fileIO = components.classes["@mozilla.org/network/file-input-stream;1"].createInstance("nsIFileInputStream")
self.fileIO.init(file, NS_RDONLY, -1,0)
self.inputStream = components.classes["@mozilla.org/scriptableinputstream;1"].createInstance("nsIScriptableInputStream")
self.inputStream.init(self.fileIO)
else:
raise ValueError, "Unknown mode"
def close(self):
if self.fileIO is not None:
self.fileIO.close()
self.fileIO = None
_File.close(self)
def read(self, n = -1):
return _File.read(self, n)
##########################################################
##
## Test Code
##
##########################################################
def _DoTestRead(file, expected):
# read in a couple of chunks, just to test that our various arg combinations work.
got = file.read(3)
got = got + file.read(300)
got = got + file.read(0)
got = got + file.read()
if got != expected:
raise RuntimeError, "Reading '%s' failed - got %d bytes, but expected %d bytes" % (file, len(got), len(expected))
def _DoTestBufferRead(file, expected):
# read in a couple of chunks, just to test that our various arg combinations work.
buffer = _xpcom.AllocateBuffer(50)
got = ''
while 1:
# Note - we need to reach into the file object so we
# can get at the native buffer supported function.
num = file.inputStream.read(buffer)
if num == 0:
break
got = got + str(buffer[:num])
if got != expected:
raise RuntimeError, "Reading '%s' failed - got %d bytes, but expected %d bytes" % (file, len(got), len(expected))
def _TestLocalFile():
import tempfile, os
fname = tempfile.mktemp()
data = "Hello from Python"
test_file = LocalFile(fname, "w")
try:
test_file.write(data)
test_file.close()
# Make sure Python can read it OK.
f = open(fname, "r")
assert f.read() == data, "Eeek - Python could not read the data back correctly!"
f.close()
# For the sake of the test, try a re-init.
test_file.init(fname, "r")
got = str(test_file.read())
assert got == data, got
test_file.close()
# Try reading in chunks.
test_file = LocalFile(fname, "r")
got = test_file.read(10) + test_file.read()
assert got == data, got
test_file.close()
# Open the same file again for writing - this should delete the old one.
if not os.path.isfile(fname):
raise RuntimeError, "The file '%s' does not exist, but we are explicitly testing create semantics when it does" % (fname,)
test_file = LocalFile(fname, "w")
test_file.write(data)
test_file.close()
# Make sure Python can read it OK.
f = open(fname, "r")
assert f.read() == data, "Eeek - Python could not read the data back correctly after recreating an existing file!"
f.close()
# XXX - todo - test "a" mode!
finally:
os.unlink(fname)
def _TestAll():
# A mini test suite.
# Get a test file, and convert it to a file:// URI.
# check what we read is the same as when
# we read this file "normally"
fname = components.__file__
if fname[-1] in "cCoO": # fix .pyc/.pyo
fname = fname[:-1]
expected = open(fname, "rb").read()
# convert the fname to a URI.
url = LocalFileToURL(fname)
# First try passing a URL as a string.
_DoTestRead( URIFile( url.spec), expected)
# Now with a URL object.
_DoTestRead( URIFile( url ), expected)
_DoTestBufferRead( URIFile( url ), expected)
# For the sake of testing, do our pointless, demo object!
_DoTestRead( LocalFile(fname), expected )
# Now do the full test of our pointless, demo object!
_TestLocalFile()
def _TestURI(url):
test_file = URIFile(url)
print "Opened file is", test_file
got = test_file.read()
print "Read %d bytes of data from %r" % (len(got), url)
test_file.close()
if __name__=='__main__':
import sys
if len(sys.argv) < 2:
print "No URL specified on command line - performing self-test"
_TestAll()
else:
_TestURI(sys.argv[1])
| ruibarreira/linuxtrail | usr/lib/virtualbox/sdk/bindings/xpcom/python/xpcom/file.py | Python | gpl-3.0 | 11,962 |
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import runner
import pytest
def test_log_parser():
mock_log = b'''
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
[PERF],navigationStart,1460358376
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}]
result = runner.parse_log(mock_log)
assert(expected == list(result))
def test_log_parser_complex():
mock_log = b'''
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/content.html
[PERF],navigationStart,1460358300
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Some other js error logs here
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
[PERF],navigationStart,1460358376
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
"navigationStart": 1460358300,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}]
result = runner.parse_log(mock_log)
assert(expected == list(result))
def test_log_parser_empty():
mock_log = b'''
[PERF] perf block start
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF] perf block end
'''
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
result = runner.parse_log(mock_log, mock_testcase)
assert(expected == list(result))
def test_log_parser_error():
mock_log = b'Nothing here! Test failed!'
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
result = runner.parse_log(mock_log, mock_testcase)
assert(expected == list(result))
def test_log_parser_bad_testcase_name():
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
# Notice the testcase is about:blank, servo crashed
mock_log = b'''
[PERF] perf block start
[PERF],testcase,about:blank
[PERF],navigationStart,1460358376
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
result = runner.parse_log(mock_log, mock_testcase)
assert(expected == list(result))
def test_manifest_loader():
text = '''
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
# Disabled! http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
'''
expected = [
"http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html",
"http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html",
"http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html"
]
assert(expected == list(runner.parse_manifest(text)))
def test_filter_result_by_manifest():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
"domComplete": 1460358389000,
}, {
"testcase": "non-existing-html",
"domComplete": 1460358389000,
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
}]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
}]
manifest = [
"http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
]
assert(expected == runner.filter_result_by_manifest(input_json, manifest))
def test_filter_result_by_manifest_error():
input_json = [{
"testcase": "1.html",
"domComplete": 1460358389000,
}]
manifest = [
"1.html",
"2.html"
]
with pytest.raises(Exception) as execinfo:
runner.filter_result_by_manifest(input_json, manifest)
assert "Missing test result" in str(execinfo.value)
def test_take_result_median_odd():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001,
"domLoading": 1460358380002
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389003,
"domLoading": 1460358380003
}]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380002
}]
assert(expected == runner.take_result_median(input_json, len(input_json)))
def test_take_result_median_even():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001,
"domLoading": 1460358380002
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001.5,
"domLoading": 1460358380001.5
}]
assert(expected == runner.take_result_median(input_json, len(input_json)))
def test_take_result_median_error():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": None,
"domLoading": 1460358380002
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001.5
}]
assert(expected == runner.take_result_median(input_json, len(input_json)))
def test_log_result():
results = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": -1
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": -1
}, {
"testcase": "http://localhost:8000/page_load_test/104.com/www.104.com/index.html",
"domComplete": 123456789
}]
expected = """
========================================
Total 3 tests; 1 succeeded, 2 failed.
Failure summary:
- http://localhost:8000/page_load_test/56.com/www.56.com/index.html
========================================
"""
assert(expected == runner.format_result_summary(results))
| wldcordeiro/servo | etc/ci/performance/test_runner.py | Python | mpl-2.0 | 14,471 |
#!/usr/bin/python
# This creates a CSV file from the output of the debug output of subtarget:
# llvm-tblgen --gen-subtarget --debug-only=subtarget-emitter
# With thanks to Dave Estes for mentioning the idea at 2014 LLVM Developers' Meeting
import os;
import sys;
import re;
import operator;
table = {}
models = set()
filt = None
def add(instr, model, resource=None):
global table, models
entry = table.setdefault(instr, dict())
entry[model] = resource
models.add(model)
def filter_model(m):
global filt
if m and filt:
return filt.search(m) != None
else:
return True
def display():
global table, models
# remove default and itinerary so we can control their sort order to make
# them first
models.discard("default")
models.discard("itinerary")
ordered_table = sorted(table.items(), key=operator.itemgetter(0))
ordered_models = ["itinerary", "default"]
ordered_models.extend(sorted(models))
ordered_models = [m for m in ordered_models if filter_model(m)]
# print header
sys.stdout.write("instruction")
for model in ordered_models:
sys.stdout.write(", {}".format(model))
sys.stdout.write(os.linesep)
for (instr, mapping) in ordered_table:
sys.stdout.write(instr)
for model in ordered_models:
if model in mapping and mapping[model] is not None:
sys.stdout.write(", {}".format(mapping[model]))
else:
sys.stdout.write(", ")
sys.stdout.write(os.linesep)
def machineModelCover(path):
# The interesting bits
re_sched_default = re.compile("SchedRW machine model for ([^ ]*) (.*)\n");
re_sched_no_default = re.compile("No machine model for ([^ ]*)\n");
re_sched_spec = re.compile("InstRW on ([^ ]*) for ([^ ]*) (.*)\n");
re_sched_no_spec = re.compile("No machine model for ([^ ]*) on processor (.*)\n");
re_sched_itin = re.compile("Itinerary for ([^ ]*): ([^ ]*)\n")
# scan the file
with open(path, 'r') as f:
for line in f.readlines():
match = re_sched_default.match(line)
if match: add(match.group(1), "default", match.group(2))
match = re_sched_no_default.match(line)
if match: add(match.group(1), "default")
match = re_sched_spec.match(line)
if match: add(match.group(2), match.group(1), match.group(3))
match = re_sched_no_spec.match(line)
if match: add(match.group(1), match.group(2))
match = re_sched_itin.match(line)
if match: add(match.group(1), "itinerary", match.group(2))
display()
if len(sys.argv) > 2:
filt = re.compile(sys.argv[2], re.IGNORECASE)
machineModelCover(sys.argv[1])
| apple/swift-llvm | utils/schedcover.py | Python | apache-2.0 | 2,759 |
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
_CSP = "font-src " + url1
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_font-src_cross-origin_allowed</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#font-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<style>
@font-face {
font-family: Canvas;
src: url('""" + url1 + """/tests/csp/support/w3c/CanvasTest.ttf');
}
#test {
font-family: Canvas;
}
</style>
</head>
<body>
<p>Test passes if the two lines are different in font</p>
<div id="test">1234 ABCD</div>
<div>1234 ABCD</div>
</body>
</html> """
| kaixinjxq/web-testing-service | wts/tests/csp/csp_font-src_cross-origin_allowed-manual.py | Python | bsd-3-clause | 2,615 |
#!/usr/bin/env python
# This demonstrates a node group configurations.
#
# Node groups can be defined with the syntax "-g N@IP0,IP1-IP2,IP3".
# This says to create a group of N nodes with IPs IP0, IP1, ..., IP2,
# IP3. Run it with deterministic IPs causes lots of gratuitous IP
# reassignments. Running with --nd fixes this.
import ctdb_takeover
import sys
from optparse import make_option
import string
ctdb_takeover.process_args([
make_option("-g", "--group",
action="append", type="string", dest="groups",
help="define a node group using N@IPs syntax"),
])
def expand_range(r):
sr = r.split("-", 1)
if len(sr) == 2:
all = string.ascii_uppercase + string.ascii_lowercase
sr = list(all[all.index(sr[0]):all.index(sr[1])+1])
return sr
def add_node_group(s):
(count, ips_str) = s.split("@", 1)
ips = [i for r in ips_str.split(",") \
for i in expand_range(r) if r != ""]
for i in range(int(count)):
c.add_node(ctdb_takeover.Node(ips))
c = ctdb_takeover.Cluster()
if ctdb_takeover.options.groups is None:
print "Error: no node groups defined."
sys.exit(1)
for g in ctdb_takeover.options.groups:
add_node_group(g)
c.recover()
c.random_iterations()
| rootfs/ctdb | tests/takeover/simulation/node_group.py | Python | gpl-3.0 | 1,299 |
"""Intesishome platform."""
| jawilson/home-assistant | homeassistant/components/intesishome/__init__.py | Python | apache-2.0 | 28 |
callback_classes = [
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| letiangit/802.11ah-ns3 | src/energy/bindings/callbacks_list.py | Python | gpl-2.0 | 641 |
# testing/schema.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import exclusions
from .. import schema, event
from . import config
__all__ = 'Table', 'Column',
table_options = {}
def Table(*args, **kw):
"""A schema.Table wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in list(kw)
if k.startswith('test_')])
kw.update(table_options)
if exclusions.against(config._current, 'mysql'):
if 'mysql_engine' not in kw and 'mysql_type' not in kw:
if 'test_needs_fk' in test_opts or 'test_needs_acid' in test_opts:
kw['mysql_engine'] = 'InnoDB'
else:
kw['mysql_engine'] = 'MyISAM'
# Apply some default cascading rules for self-referential foreign keys.
# MySQL InnoDB has some issues around seleting self-refs too.
if exclusions.against(config._current, 'firebird'):
table_name = args[0]
unpack = (config.db.dialect.
identifier_preparer.unformat_identifiers)
# Only going after ForeignKeys in Columns. May need to
# expand to ForeignKeyConstraint too.
fks = [fk
for col in args if isinstance(col, schema.Column)
for fk in col.foreign_keys]
for fk in fks:
# root around in raw spec
ref = fk._colspec
if isinstance(ref, schema.Column):
name = ref.table.name
else:
# take just the table name: on FB there cannot be
# a schema, so the first element is always the
# table name, possibly followed by the field name
name = unpack(ref)[0]
if name == table_name:
if fk.ondelete is None:
fk.ondelete = 'CASCADE'
if fk.onupdate is None:
fk.onupdate = 'CASCADE'
return schema.Table(*args, **kw)
def Column(*args, **kw):
"""A schema.Column wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in list(kw)
if k.startswith('test_')])
if not config.requirements.foreign_key_ddl.enabled_for_config(config):
args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)]
col = schema.Column(*args, **kw)
if test_opts.get('test_needs_autoincrement', False) and \
kw.get('primary_key', False):
if col.default is None and col.server_default is None:
col.autoincrement = True
# allow any test suite to pick up on this
col.info['test_needs_autoincrement'] = True
# hardcoded rule for firebird, oracle; this should
# be moved out
if exclusions.against(config._current, 'firebird', 'oracle'):
def add_seq(c, tbl):
c._init_items(
schema.Sequence(_truncate_name(
config.db.dialect, tbl.name + '_' + c.name + '_seq'),
optional=True)
)
event.listen(col, 'after_parent_attach', add_seq, propagate=True)
return col
def _truncate_name(dialect, name):
if len(name) > dialect.max_identifier_length:
return name[0:max(dialect.max_identifier_length - 6, 0)] + \
"_" + hex(hash(name) % 64)[2:]
else:
return name
| wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/sqlalchemy/testing/schema.py | Python | gpl-3.0 | 3,556 |
from .responses import CloudWatchResponse
url_bases = [
"https?://monitoring.(.+).amazonaws.com",
]
url_paths = {
'{0}/$': CloudWatchResponse.dispatch,
}
| botify-labs/moto | moto/cloudwatch/urls.py | Python | apache-2.0 | 164 |
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from desktop.lib.python_util import force_dict_to_strings
from config import Config
class Connector(object):
def __init__(self, id, name, version, link_config, job_config, config_resources={}, **kwargs):
self.id = id
self.name = name
self.version = version
self.job_config = job_config
self.link_config = link_config
self.config_resources = config_resources
setattr(self, 'class', kwargs['class'])
@staticmethod
def from_dict(connector_dict):
connector_dict.setdefault('link_config', [])
connector_dict['link_config'] = [ Config.from_dict(link_config_dict) for link_config_dict in connector_dict['link-config'] ]
connector_dict.setdefault('job_config', {})
connector_dict['job_config'] = {}
if 'FROM' in connector_dict['job-config']:
connector_dict['job_config']['FROM'] = [ Config.from_dict(from_config_dict) for from_config_dict in connector_dict['job-config']['FROM'] ]
if 'TO' in connector_dict['job-config']:
connector_dict['job_config']['TO'] = [ Config.from_dict(to_config_dict) for to_config_dict in connector_dict['job-config']['TO'] ]
connector_dict['config_resources'] = connector_dict['all-config-resources']
return Connector(**force_dict_to_strings(connector_dict))
def to_dict(self):
d = {
'id': self.id,
'name': self.name,
'version': self.version,
'class': getattr(self, 'class'),
'link-config': [ link_config.to_dict() for link_config in self.link_config ],
'job-config': {},
'all-config-resources': self.config_resources
}
if 'FROM' in self.job_config:
d['job-config']['FROM'] = [ job_config.to_dict() for job_config in self.job_config['FROM'] ]
if 'TO' in self.job_config:
d['job-config']['TO'] = [ job_config.to_dict() for job_config in self.job_config['TO'] ]
return d
| vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/apps/sqoop/src/sqoop/client/connector.py | Python | gpl-2.0 | 2,640 |
"""
Helper functions for managing interactions with the LTI outcomes service defined
in LTI v1.1.
"""
from hashlib import sha1
from base64 import b64encode
import logging
import uuid
from lxml import etree
from lxml.builder import ElementMaker
from oauthlib.oauth1 import Client
from oauthlib.common import to_unicode
import requests
from requests.exceptions import RequestException
import requests_oauthlib
from lti_provider.models import GradedAssignment, OutcomeService
log = logging.getLogger("edx.lti_provider")
class BodyHashClient(Client):
"""
OAuth1 Client that adds body hash support (required by LTI).
The default Client doesn't support body hashes, so we have to add it ourselves.
The spec:
https://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
"""
def get_oauth_params(self, request):
"""Override get_oauth_params to add the body hash."""
params = super(BodyHashClient, self).get_oauth_params(request)
digest = b64encode(sha1(request.body.encode('UTF-8')).digest())
params.append((u'oauth_body_hash', to_unicode(digest)))
return params
def store_outcome_parameters(request_params, user, lti_consumer):
"""
Determine whether a set of LTI launch parameters contains information about
an expected score, and if so create a GradedAssignment record. Create a new
OutcomeService record if none exists for the tool consumer, and update any
incomplete record with additional data if it is available.
"""
result_id = request_params.get('lis_result_sourcedid', None)
# We're only interested in requests that include a lis_result_sourcedid
# parameter. An LTI consumer that does not send that parameter does not
# expect scoring updates for that particular request.
if result_id:
result_service = request_params.get('lis_outcome_service_url', None)
if not result_service:
# TODO: There may be a way to recover from this error; if we know
# the LTI consumer that the request comes from then we may be able
# to figure out the result service URL. As it stands, though, this
# is a badly-formed LTI request
log.warn(
"Outcome Service: lis_outcome_service_url parameter missing "
"from scored assignment; we will be unable to return a score. "
"Request parameters: %s",
request_params
)
return
# Both usage and course ID parameters are supplied in the LTI launch URL
usage_key = request_params['usage_key']
course_key = request_params['course_key']
# Create a record of the outcome service if necessary
outcomes, __ = OutcomeService.objects.get_or_create(
lis_outcome_service_url=result_service,
lti_consumer=lti_consumer
)
GradedAssignment.objects.get_or_create(
lis_result_sourcedid=result_id,
course_key=course_key,
usage_key=usage_key,
user=user,
outcome_service=outcomes
)
def generate_replace_result_xml(result_sourcedid, score):
"""
Create the XML document that contains the new score to be sent to the LTI
consumer. The format of this message is defined in the LTI 1.1 spec.
"""
# Pylint doesn't recognize members in the LXML module
# pylint: disable=no-member
elem = ElementMaker(nsmap={None: 'http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0'})
xml = elem.imsx_POXEnvelopeRequest(
elem.imsx_POXHeader(
elem.imsx_POXRequestHeaderInfo(
elem.imsx_version('V1.0'),
elem.imsx_messageIdentifier(str(uuid.uuid4()))
)
),
elem.imsx_POXBody(
elem.replaceResultRequest(
elem.resultRecord(
elem.sourcedGUID(
elem.sourcedId(result_sourcedid)
),
elem.result(
elem.resultScore(
elem.language('en'),
elem.textString(str(score))
)
)
)
)
)
)
return etree.tostring(xml, xml_declaration=True, encoding='UTF-8')
def get_assignments_for_problem(problem_descriptor, user_id, course_key):
"""
Trace the parent hierarchy from a given problem to find all blocks that
correspond to graded assignment launches for this user. A problem may
show up multiple times for a given user; the problem could be embedded in
multiple courses (or multiple times in the same course), or the block could
be embedded more than once at different granularities (as an individual
problem and as a problem in a vertical, for example).
Returns a list of GradedAssignment objects that are associated with the
given descriptor for the current user.
"""
locations = []
current_descriptor = problem_descriptor
while current_descriptor:
locations.append(current_descriptor.location)
current_descriptor = current_descriptor.get_parent()
assignments = GradedAssignment.objects.filter(
user=user_id, course_key=course_key, usage_key__in=locations
)
return assignments
def send_score_update(assignment, score):
"""
Create and send the XML message to the campus LMS system to update the grade
for a single graded assignment.
"""
xml = generate_replace_result_xml(
assignment.lis_result_sourcedid, score
)
try:
response = sign_and_send_replace_result(assignment, xml)
except RequestException:
# failed to send result. 'response' is None, so more detail will be
# logged at the end of the method.
response = None
log.exception("Outcome Service: Error when sending result.")
# If something went wrong, make sure that we have a complete log record.
# That way we can manually fix things up on the campus system later if
# necessary.
if not (response and check_replace_result_response(response)):
log.error(
"Outcome Service: Failed to update score on LTI consumer. "
"User: %s, course: %s, usage: %s, score: %s, status: %s, body: %s",
assignment.user,
assignment.course_key,
assignment.usage_key,
score,
response,
response.text if response else 'Unknown'
)
def sign_and_send_replace_result(assignment, xml):
"""
Take the XML document generated in generate_replace_result_xml, and sign it
with the consumer key and secret assigned to the consumer. Send the signed
message to the LTI consumer.
"""
outcome_service = assignment.outcome_service
consumer = outcome_service.lti_consumer
consumer_key = consumer.consumer_key
consumer_secret = consumer.consumer_secret
# Calculate the OAuth signature for the replace_result message.
# TODO: According to the LTI spec, there should be an additional
# oauth_body_hash field that contains a digest of the replace_result
# message. Testing with Canvas throws an error when this field is included.
# This code may need to be revisited once we test with other LMS platforms,
# and confirm whether there's a bug in Canvas.
oauth = requests_oauthlib.OAuth1(
consumer_key,
consumer_secret,
signature_method='HMAC-SHA1',
client_class=BodyHashClient,
force_include_body=True
)
headers = {'content-type': 'application/xml'}
response = requests.post(
assignment.outcome_service.lis_outcome_service_url,
data=xml,
auth=oauth,
headers=headers
)
return response
def check_replace_result_response(response):
"""
Parse the response sent by the LTI consumer after an score update message
has been processed. Return True if the message was properly received, or
False if not. The format of this message is defined in the LTI 1.1 spec.
"""
# Pylint doesn't recognize members in the LXML module
# pylint: disable=no-member
if response.status_code != 200:
log.error(
"Outcome service response: Unexpected status code %s",
response.status_code
)
return False
try:
xml = response.content
root = etree.fromstring(xml)
except etree.ParseError as ex:
log.error("Outcome service response: Failed to parse XML: %s\n %s", ex, xml)
return False
major_codes = root.xpath(
'//ns:imsx_codeMajor',
namespaces={'ns': 'http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0'})
if len(major_codes) != 1:
log.error(
"Outcome service response: Expected exactly one imsx_codeMajor field in response. Received %s",
major_codes
)
return False
if major_codes[0].text != 'success':
log.error(
"Outcome service response: Unexpected major code: %s.",
major_codes[0].text
)
return False
return True
| zofuthan/edx-platform | lms/djangoapps/lti_provider/outcomes.py | Python | agpl-3.0 | 9,222 |
import re
from os.path import splitext
from livestreamer.compat import urlparse, unquote
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.stream import HTTPStream, RTMPStream
_url_re = re.compile("""
http(s)?://(\w+\.)?aliez.tv
(?:
/live/[^/]+
)?
(?:
/video/\d+/[^/]+
)?
""", re.VERBOSE)
_file_re = re.compile("\"?file\"?:\s+['\"]([^'\"]+)['\"]")
_swf_url_re = re.compile("swfobject.embedSWF\(\"([^\"]+)\",")
_schema = validate.Schema(
validate.union({
"urls": validate.all(
validate.transform(_file_re.findall),
validate.map(unquote),
[validate.url()]
),
"swf": validate.all(
validate.transform(_swf_url_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.url(
scheme="http",
path=validate.endswith("swf")
)
)
)
)
})
)
class Aliez(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url, schema=_schema)
streams = {}
for url in res["urls"]:
parsed = urlparse(url)
if parsed.scheme.startswith("rtmp"):
params = {
"rtmp": url,
"pageUrl": self.url,
"live": True
}
if res["swf"]:
params["swfVfy"] = res["swf"]
stream = RTMPStream(self.session, params)
streams["live"] = stream
elif parsed.scheme.startswith("http"):
name = splitext(parsed.path)[1][1:]
stream = HTTPStream(self.session, url)
streams[name] = stream
return streams
__plugin__ = Aliez
| charmander/livestreamer | src/livestreamer/plugins/alieztv.py | Python | bsd-2-clause | 2,005 |
import os
from numpy.testing import assert_array_equal, raises, run_module_suite
import numpy as np
import skimage.io as io
from skimage.io.manage_plugins import plugin_store
from skimage import data_dir
def test_stack_basic():
x = np.arange(12).reshape(3, 4)
io.push(x)
assert_array_equal(io.pop(), x)
@raises(ValueError)
def test_stack_non_array():
io.push([[1, 2, 3]])
def test_imread_url():
# tweak data path so that file URI works on both unix and windows.
data_path = data_dir.lstrip(os.path.sep)
data_path = data_path.replace(os.path.sep, '/')
image_url = 'file:///{0}/camera.png'.format(data_path)
image = io.imread(image_url)
assert image.shape == (512, 512)
if __name__ == "__main__":
run_module_suite()
| Britefury/scikit-image | skimage/io/tests/test_io.py | Python | bsd-3-clause | 769 |
# Kills a process by process name
#
# Uses the Performance Data Helper to locate the PID, then kills it.
# Will only kill the process if there is only one process of that name
# (eg, attempting to kill "Python.exe" will only work if there is only
# one Python.exe running. (Note that the current process does not
# count - ie, if Python.exe is hosting this script, you can still kill
# another Python.exe (as long as there is only one other Python.exe)
# Really just a demo for the win32pdh(util) module, which allows you
# to get all sorts of information about a running process and many
# other aspects of your system.
import win32api, win32pdhutil, win32con, sys
def killProcName(procname):
# Change suggested by Dan Knierim, who found that this performed a
# "refresh", allowing us to kill processes created since this was run
# for the first time.
try:
win32pdhutil.GetPerformanceAttributes('Process','ID Process',procname)
except:
pass
pids = win32pdhutil.FindPerformanceAttributesByName(procname)
# If _my_ pid in there, remove it!
try:
pids.remove(win32api.GetCurrentProcessId())
except ValueError:
pass
if len(pids)==0:
result = "Can't find %s" % procname
elif len(pids)>1:
result = "Found too many %s's - pids=`%s`" % (procname,pids)
else:
handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0,pids[0])
win32api.TerminateProcess(handle,0)
win32api.CloseHandle(handle)
result = ""
return result
if __name__ == '__main__':
if len(sys.argv)>1:
for procname in sys.argv[1:]:
result = killProcName(procname)
if result:
print result
print "Dumping all processes..."
win32pdhutil.ShowAllProcesses()
else:
print "Killed %s" % procname
else:
print "Usage: killProcName.py procname ..."
| JulienMcJay/eclock | windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/scripts/killProcName.py | Python | gpl-2.0 | 1,766 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Luis Alberto Perez Lazaro <luisperlazaro@gmail.com>
# Copyright: (c) 2015, Jakub Jirutka <jakub@jirutka.cz>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: patch
author:
- Jakub Jirutka (@jirutka)
- Luis Alberto Perez Lazaro (@luisperlaz)
version_added: '1.9'
description:
- Apply patch files using the GNU patch tool.
short_description: Apply patch files using the GNU patch tool
options:
basedir:
description:
- Path of a base directory in which the patch file will be applied.
- May be omitted when C(dest) option is specified, otherwise required.
type: path
dest:
description:
- Path of the file on the remote machine to be patched.
- The names of the files to be patched are usually taken from the patch
file, but if there's just one file to be patched it can specified with
this option.
type: path
aliases: [ originalfile ]
src:
description:
- Path of the patch file as accepted by the GNU patch tool. If
C(remote_src) is 'no', the patch source file is looked up from the
module's I(files) directory.
type: path
required: true
aliases: [ patchfile ]
state:
description:
- Whether the patch should be applied or reverted.
type: str
choices: [ absent, present ]
default: present
version_added: "2.6"
remote_src:
description:
- If C(no), it will search for src at originating/master machine, if C(yes) it will
go to the remote/target machine for the C(src).
type: bool
default: no
strip:
description:
- Number that indicates the smallest prefix containing leading slashes
that will be stripped from each file name found in the patch file.
- For more information see the strip parameter of the GNU patch tool.
type: int
default: 0
backup:
version_added: "2.0"
description:
- Passes C(--backup --version-control=numbered) to patch, producing numbered backup copies.
type: bool
default: no
binary:
version_added: "2.0"
description:
- Setting to C(yes) will disable patch's heuristic for transforming CRLF
line endings into LF.
- Line endings of src and dest must match.
- If set to C(no), C(patch) will replace CRLF in C(src) files on POSIX.
type: bool
default: no
notes:
- This module requires GNU I(patch) utility to be installed on the remote host.
'''
EXAMPLES = r'''
- name: Apply patch to one file
patch:
src: /tmp/index.html.patch
dest: /var/www/index.html
- name: Apply patch to multiple files under basedir
patch:
src: /tmp/customize.patch
basedir: /var/www
strip: 1
- name: Revert patch to one file
patch:
src: /tmp/index.html.patch
dest: /var/www/index.html
state: absent
'''
import os
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule, get_platform
from ansible.module_utils._text import to_native
class PatchError(Exception):
pass
def add_dry_run_option(opts):
# Older versions of FreeBSD, OpenBSD and NetBSD support the --check option only.
if get_platform().lower() in ['openbsd', 'netbsd', 'freebsd']:
opts.append('--check')
else:
opts.append('--dry-run')
def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, state='present'):
opts = ['--quiet', '--forward',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
add_dry_run_option(opts)
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if state == 'present':
opts.append('--reverse')
(rc, _, _) = patch_func(opts)
return rc == 0
def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False, state='present'):
opts = ['--quiet', '--forward', '--batch', '--reject-file=-',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
if dry_run:
add_dry_run_option(opts)
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if backup:
opts.append('--backup --version-control=numbered')
if state == 'absent':
opts.append('--reverse')
(rc, out, err) = patch_func(opts)
if rc != 0:
msg = err or out
raise PatchError(msg)
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='path', required=True, aliases=['patchfile']),
dest=dict(type='path', aliases=['originalfile']),
basedir=dict(type='path'),
strip=dict(type='int', default=0),
remote_src=dict(type='bool', default=False),
# NB: for 'backup' parameter, semantics is slightly different from standard
# since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~")
backup=dict(type='bool', default=False),
binary=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
required_one_of=[['dest', 'basedir']],
supports_check_mode=True,
)
# Create type object as namespace for module params
p = type('Params', (), module.params)
if not os.access(p.src, os.R_OK):
module.fail_json(msg="src %s doesn't exist or not readable" % (p.src))
if p.dest and not os.access(p.dest, os.W_OK):
module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest))
if p.basedir and not os.path.exists(p.basedir):
module.fail_json(msg="basedir %s doesn't exist" % (p.basedir))
if not p.basedir:
p.basedir = os.path.dirname(p.dest)
patch_bin = module.get_bin_path('patch')
if patch_bin is None:
module.fail_json(msg="patch command not found")
def patch_func(opts):
return module.run_command('%s %s' % (patch_bin, ' '.join(opts)))
# patch need an absolute file name
p.src = os.path.abspath(p.src)
changed = False
if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, state=p.state):
try:
apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip,
dry_run=module.check_mode, backup=p.backup, state=p.state)
changed = True
except PatchError as e:
module.fail_json(msg=to_native(e), exception=format_exc())
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| rosmo/ansible | lib/ansible/modules/files/patch.py | Python | gpl-3.0 | 7,109 |
from setuptools import setup
# Replace the place holders with values for your project
setup(
# Do not use underscores in the plugin name.
name='custom-wf-plugin',
version='0.1',
author='alien',
author_email='alien@fastconnect.fr',
description='custom generated workflows',
# This must correspond to the actual packages in the plugin.
packages=['plugin'],
license='Apache',
zip_safe=True,
install_requires=[
# Necessary dependency for developing plugins, do not remove!
"cloudify-plugins-common>=3.2"
],
test_requires=[
"cloudify-dsl-parser>=3.2"
"nose"
]
) | victorkeophila/alien4cloud-cloudify3-provider | src/test/resources/outputs/blueprints/openstack/tomcat/plugins/custom_wf_plugin/setup.py | Python | apache-2.0 | 650 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from os.path import exists
from os.path import join
CONF_VERSION = 1
########################################################################
class ConfigParser:
#----------------------------------------------------------------------
def __init__(self, configdir):
"""Constructor"""
self.configdir = configdir
self.config = {}
if self.checkVersion():
self.readConfig()
#----------------------------------------------------------------------
def checkVersion(self):
if not exists(join(self.configdir, "pyload.conf")):
return False
f = open(join(self.configdir, "pyload.conf"), "rb")
v = f.readline()
f.close()
v = v[v.find(":")+1:].strip()
if int(v) < CONF_VERSION:
return False
return True
#----------------------------------------------------------------------
def readConfig(self):
"""reads the config file"""
self.config = self.parseConfig(join(self.configdir, "pyload.conf"))
#----------------------------------------------------------------------
def parseConfig(self, config):
"""parses a given configfile"""
f = open(config)
config = f.read()
config = config.split("\n")[1:]
conf = {}
section, option, value, typ, desc = "","","","",""
listmode = False
for line in config:
line = line.rpartition("#") # removes comments
if line[1]:
line = line[0]
else:
line = line[2]
line = line.strip()
try:
if line == "":
continue
elif line.endswith(":"):
section, none, desc = line[:-1].partition('-')
section = section.strip()
desc = desc.replace('"', "").strip()
conf[section] = { "desc" : desc }
else:
if listmode:
if line.endswith("]"):
listmode = False
line = line.replace("]","")
value += [self.cast(typ, x.strip()) for x in line.split(",") if x]
if not listmode:
conf[section][option] = { "desc" : desc,
"type" : typ,
"value" : value}
else:
content, none, value = line.partition("=")
content, none, desc = content.partition(":")
desc = desc.replace('"', "").strip()
typ, option = content.split()
value = value.strip()
if value.startswith("["):
if value.endswith("]"):
listmode = False
value = value[:-1]
else:
listmode = True
value = [self.cast(typ, x.strip()) for x in value[1:].split(",") if x]
else:
value = self.cast(typ, value)
if not listmode:
conf[section][option] = { "desc" : desc,
"type" : typ,
"value" : value}
except:
pass
f.close()
return conf
#----------------------------------------------------------------------
def cast(self, typ, value):
"""cast value to given format"""
if type(value) not in (str, unicode):
return value
if typ == "int":
return int(value)
elif typ == "bool":
return True if value.lower() in ("1","true", "on", "an","yes") else False
else:
return value
#----------------------------------------------------------------------
def get(self, section, option):
"""get value"""
return self.config[section][option]["value"]
#----------------------------------------------------------------------
def __getitem__(self, section):
"""provides dictonary like access: c['section']['option']"""
return Section(self, section)
########################################################################
class Section:
"""provides dictionary like access for configparser"""
#----------------------------------------------------------------------
def __init__(self, parser, section):
"""Constructor"""
self.parser = parser
self.section = section
#----------------------------------------------------------------------
def __getitem__(self, item):
"""getitem"""
return self.parser.get(self.section, item)
| EvolutionClip/pyload | module/gui/CoreConfigParser.py | Python | gpl-3.0 | 5,711 |
import re
import sys
import pip
from pip.req import InstallRequirement
from pip.log import logger
from pip.basecommand import Command
from pip.util import get_installed_distributions
import pkg_resources
class FreezeCommand(Command):
"""Output installed packages in requirements format."""
name = 'freeze'
usage = """
%prog [options]"""
summary = 'Output installed packages in requirements format.'
def __init__(self, *args, **kw):
super(FreezeCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirement',
action='store',
default=None,
metavar='file',
help="Use the order in the given requirements file and it's comments when generating output.")
self.cmd_opts.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL for finding packages, which will be added to the output.')
self.cmd_opts.add_option(
'-l', '--local',
dest='local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not output globally-installed packages.')
self.parser.insert_option_group(0, self.cmd_opts)
def setup_logging(self):
logger.move_stdout_to_stderr()
def run(self, options, args):
requirement = options.requirement
find_links = options.find_links or []
local_only = options.local
## FIXME: Obviously this should be settable:
find_tags = False
skip_match = None
skip_regex = options.skip_requirements_regex
if skip_regex:
skip_match = re.compile(skip_regex)
dependency_links = []
f = sys.stdout
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
f.write('-f %s\n' % link)
installations = {}
for dist in get_installed_distributions(local_only=local_only):
req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)
installations[req.name] = req
if requirement:
req_f = open(requirement)
for line in req_f:
if not line.strip() or line.strip().startswith('#'):
f.write(line)
continue
if skip_match and skip_match.search(line):
f.write(line)
continue
elif line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs)
elif (line.startswith('-r') or line.startswith('--requirement')
or line.startswith('-Z') or line.startswith('--always-unzip')
or line.startswith('-f') or line.startswith('-i')
or line.startswith('--extra-index-url')
or line.startswith('--find-links')
or line.startswith('--index-url')):
f.write(line)
continue
else:
line_req = InstallRequirement.from_line(line)
if not line_req.name:
logger.notify("Skipping line because it's not clear what it would install: %s"
% line.strip())
logger.notify(" (add #egg=PackageName to the URL to avoid this warning)")
continue
if line_req.name not in installations:
logger.warn("Requirement file contains %s, but that package is not installed"
% line.strip())
continue
f.write(str(installations[line_req.name]))
del installations[line_req.name]
f.write('## The following requirements were added by pip --freeze:\n')
for installation in sorted(installations.values(), key=lambda x: x.name):
f.write(str(installation))
| OpenWinCon/OpenWinNet | web-gui/myvenv/lib/python3.4/site-packages/pip/commands/freeze.py | Python | apache-2.0 | 4,647 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
WSGI stack, common code.
"""
import httplib
import urllib
import xmlrpclib
import StringIO
import errno
import logging
import platform
import socket
import sys
import threading
import traceback
import werkzeug.serving
import werkzeug.contrib.fixers
import openerp
import openerp.tools.config as config
_logger = logging.getLogger(__name__)
# XML-RPC fault codes. Some care must be taken when changing these: the
# constants are also defined client-side and must remain in sync.
# User code must use the exceptions defined in ``openerp.exceptions`` (not
# create directly ``xmlrpclib.Fault`` objects).
RPC_FAULT_CODE_CLIENT_ERROR = 1 # indistinguishable from app. error.
RPC_FAULT_CODE_APPLICATION_ERROR = 1
RPC_FAULT_CODE_WARNING = 2
RPC_FAULT_CODE_ACCESS_DENIED = 3
RPC_FAULT_CODE_ACCESS_ERROR = 4
def xmlrpc_return(start_response, service, method, params, string_faultcode=False):
"""
Helper to call a service's method with some params, using a wsgi-supplied
``start_response`` callback.
This is the place to look at to see the mapping between core exceptions
and XML-RPC fault codes.
"""
# Map OpenERP core exceptions to XML-RPC fault codes. Specific exceptions
# defined in ``openerp.exceptions`` are mapped to specific fault codes;
# all the other exceptions are mapped to the generic
# RPC_FAULT_CODE_APPLICATION_ERROR value.
# This also mimics SimpleXMLRPCDispatcher._marshaled_dispatch() for
# exception handling.
try:
result = openerp.http.dispatch_rpc(service, method, params)
response = xmlrpclib.dumps((result,), methodresponse=1, allow_none=False, encoding=None)
except Exception, e:
if string_faultcode:
response = xmlrpc_handle_exception_string(e)
else:
response = xmlrpc_handle_exception_int(e)
start_response("200 OK", [('Content-Type','text/xml'), ('Content-Length', str(len(response)))])
return [response]
def xmlrpc_handle_exception_int(e):
if isinstance(e, openerp.exceptions.UserError):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, openerp.tools.ustr(e.value))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.RedirectWarning):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.MissingError):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance (e, openerp.exceptions.AccessError):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_ERROR, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessDenied):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.DeferredException):
info = e.traceback
# Which one is the best ?
formatted_info = "".join(traceback.format_exception(*info))
#formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info
fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info)
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
if hasattr(e, 'message') and e.message == 'AccessDenied': # legacy
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
#InternalError
else:
info = sys.exc_info()
# Which one is the best ?
formatted_info = "".join(traceback.format_exception(*info))
#formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info
fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info)
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
return response
def xmlrpc_handle_exception_string(e):
if isinstance(e, openerp.exceptions.UserError):
fault = xmlrpclib.Fault('warning -- %s\n\n%s' % (e.name, e.value), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.RedirectWarning):
fault = xmlrpclib.Fault('warning -- Warning\n\n' + str(e), '')
elif isinstance(e, openerp.exceptions.MissingError):
fault = xmlrpclib.Fault('warning -- MissingError\n\n' + str(e), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessError):
fault = xmlrpclib.Fault('warning -- AccessError\n\n' + str(e), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessDenied):
fault = xmlrpclib.Fault('AccessDenied', str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.DeferredException):
info = e.traceback
formatted_info = "".join(traceback.format_exception(*info))
fault = xmlrpclib.Fault(openerp.tools.ustr(e.message), formatted_info)
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
#InternalError
else:
info = sys.exc_info()
formatted_info = "".join(traceback.format_exception(*info))
fault = xmlrpclib.Fault(openerp.tools.exception_to_unicode(e), formatted_info)
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
return response
def wsgi_xmlrpc(environ, start_response):
""" Two routes are available for XML-RPC
/xmlrpc/<service> route returns faultCode as strings. This is a historic
violation of the protocol kept for compatibility.
/xmlrpc/2/<service> is a new route that returns faultCode as int and is
therefore fully compliant.
"""
if environ['REQUEST_METHOD'] == 'POST' and environ['PATH_INFO'].startswith('/xmlrpc/'):
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
# Distinguish betweed the 2 faultCode modes
string_faultcode = True
if environ['PATH_INFO'].startswith('/xmlrpc/2/'):
service = environ['PATH_INFO'][len('/xmlrpc/2/'):]
string_faultcode = False
else:
service = environ['PATH_INFO'][len('/xmlrpc/'):]
params, method = xmlrpclib.loads(data)
return xmlrpc_return(start_response, service, method, params, string_faultcode)
def application_unproxied(environ, start_response):
""" WSGI entry point."""
# cleanup db/uid trackers - they're set at HTTP dispatch in
# web.session.OpenERPSession.send() and at RPC dispatch in
# openerp.service.web_services.objects_proxy.dispatch().
# /!\ The cleanup cannot be done at the end of this `application`
# method because werkzeug still produces relevant logging afterwards
if hasattr(threading.current_thread(), 'uid'):
del threading.current_thread().uid
if hasattr(threading.current_thread(), 'dbname'):
del threading.current_thread().dbname
with openerp.api.Environment.manage():
# Try all handlers until one returns some result (i.e. not None).
for handler in [wsgi_xmlrpc, openerp.http.root]:
result = handler(environ, start_response)
if result is None:
continue
return result
# We never returned from the loop.
response = 'No handler found.\n'
start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', str(len(response)))])
return [response]
def application(environ, start_response):
if config['proxy_mode'] and 'HTTP_X_FORWARDED_HOST' in environ:
return werkzeug.contrib.fixers.ProxyFix(application_unproxied)(environ, start_response)
else:
return application_unproxied(environ, start_response)
| vileopratama/vitech | src/openerp/service/wsgi_server.py | Python | mit | 8,202 |
# -*- coding: utf-8 -*-
"""
pygments.lexers._openedge_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the OpenEdgeLexer.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
OPENEDGEKEYWORDS = (
'ABSOLUTE',
'ABS',
'ABSO',
'ABSOL',
'ABSOLU',
'ABSOLUT',
'ACCELERATOR',
'ACCUMULATE',
'ACCUM',
'ACCUMU',
'ACCUMUL',
'ACCUMULA',
'ACCUMULAT',
'ACTIVE-FORM',
'ACTIVE-WINDOW',
'ADD',
'ADD-BUFFER',
'ADD-CALC-COLUMN',
'ADD-COLUMNS-FROM',
'ADD-EVENTS-PROCEDURE',
'ADD-FIELDS-FROM',
'ADD-FIRST',
'ADD-INDEX-FIELD',
'ADD-LAST',
'ADD-LIKE-COLUMN',
'ADD-LIKE-FIELD',
'ADD-LIKE-INDEX',
'ADD-NEW-FIELD',
'ADD-NEW-INDEX',
'ADD-SCHEMA-LOCATION',
'ADD-SUPER-PROCEDURE',
'ADM-DATA',
'ADVISE',
'ALERT-BOX',
'ALIAS',
'ALL',
'ALLOW-COLUMN-SEARCHING',
'ALLOW-REPLICATION',
'ALTER',
'ALWAYS-ON-TOP',
'AMBIGUOUS',
'AMBIG',
'AMBIGU',
'AMBIGUO',
'AMBIGUOU',
'ANALYZE',
'ANALYZ',
'AND',
'ANSI-ONLY',
'ANY',
'ANYWHERE',
'APPEND',
'APPL-ALERT-BOXES',
'APPL-ALERT',
'APPL-ALERT-',
'APPL-ALERT-B',
'APPL-ALERT-BO',
'APPL-ALERT-BOX',
'APPL-ALERT-BOXE',
'APPL-CONTEXT-ID',
'APPLICATION',
'APPLY',
'APPSERVER-INFO',
'APPSERVER-PASSWORD',
'APPSERVER-USERID',
'ARRAY-MESSAGE',
'AS',
'ASC',
'ASCENDING',
'ASCE',
'ASCEN',
'ASCEND',
'ASCENDI',
'ASCENDIN',
'ASK-OVERWRITE',
'ASSEMBLY',
'ASSIGN',
'ASYNCHRONOUS',
'ASYNC-REQUEST-COUNT',
'ASYNC-REQUEST-HANDLE',
'AT',
'ATTACHED-PAIRLIST',
'ATTR-SPACE',
'ATTR',
'ATTRI',
'ATTRIB',
'ATTRIBU',
'ATTRIBUT',
'AUDIT-CONTROL',
'AUDIT-ENABLED',
'AUDIT-EVENT-CONTEXT',
'AUDIT-POLICY',
'AUTHENTICATION-FAILED',
'AUTHORIZATION',
'AUTO-COMPLETION',
'AUTO-COMP',
'AUTO-COMPL',
'AUTO-COMPLE',
'AUTO-COMPLET',
'AUTO-COMPLETI',
'AUTO-COMPLETIO',
'AUTO-ENDKEY',
'AUTO-END-KEY',
'AUTO-GO',
'AUTO-INDENT',
'AUTO-IND',
'AUTO-INDE',
'AUTO-INDEN',
'AUTOMATIC',
'AUTO-RESIZE',
'AUTO-RETURN',
'AUTO-RET',
'AUTO-RETU',
'AUTO-RETUR',
'AUTO-SYNCHRONIZE',
'AUTO-ZAP',
'AUTO-Z',
'AUTO-ZA',
'AVAILABLE',
'AVAIL',
'AVAILA',
'AVAILAB',
'AVAILABL',
'AVAILABLE-FORMATS',
'AVERAGE',
'AVE',
'AVER',
'AVERA',
'AVERAG',
'AVG',
'BACKGROUND',
'BACK',
'BACKG',
'BACKGR',
'BACKGRO',
'BACKGROU',
'BACKGROUN',
'BACKWARDS',
'BACKWARD',
'BASE64-DECODE',
'BASE64-ENCODE',
'BASE-ADE',
'BASE-KEY',
'BATCH-MODE',
'BATCH',
'BATCH-',
'BATCH-M',
'BATCH-MO',
'BATCH-MOD',
'BATCH-SIZE',
'BEFORE-HIDE',
'BEFORE-H',
'BEFORE-HI',
'BEFORE-HID',
'BEGIN-EVENT-GROUP',
'BEGINS',
'BELL',
'BETWEEN',
'BGCOLOR',
'BGC',
'BGCO',
'BGCOL',
'BGCOLO',
'BIG-ENDIAN',
'BINARY',
'BIND',
'BIND-WHERE',
'BLANK',
'BLOCK-ITERATION-DISPLAY',
'BORDER-BOTTOM-CHARS',
'BORDER-B',
'BORDER-BO',
'BORDER-BOT',
'BORDER-BOTT',
'BORDER-BOTTO',
'BORDER-BOTTOM-PIXELS',
'BORDER-BOTTOM-P',
'BORDER-BOTTOM-PI',
'BORDER-BOTTOM-PIX',
'BORDER-BOTTOM-PIXE',
'BORDER-BOTTOM-PIXEL',
'BORDER-LEFT-CHARS',
'BORDER-L',
'BORDER-LE',
'BORDER-LEF',
'BORDER-LEFT',
'BORDER-LEFT-',
'BORDER-LEFT-C',
'BORDER-LEFT-CH',
'BORDER-LEFT-CHA',
'BORDER-LEFT-CHAR',
'BORDER-LEFT-PIXELS',
'BORDER-LEFT-P',
'BORDER-LEFT-PI',
'BORDER-LEFT-PIX',
'BORDER-LEFT-PIXE',
'BORDER-LEFT-PIXEL',
'BORDER-RIGHT-CHARS',
'BORDER-R',
'BORDER-RI',
'BORDER-RIG',
'BORDER-RIGH',
'BORDER-RIGHT',
'BORDER-RIGHT-',
'BORDER-RIGHT-C',
'BORDER-RIGHT-CH',
'BORDER-RIGHT-CHA',
'BORDER-RIGHT-CHAR',
'BORDER-RIGHT-PIXELS',
'BORDER-RIGHT-P',
'BORDER-RIGHT-PI',
'BORDER-RIGHT-PIX',
'BORDER-RIGHT-PIXE',
'BORDER-RIGHT-PIXEL',
'BORDER-TOP-CHARS',
'BORDER-T',
'BORDER-TO',
'BORDER-TOP',
'BORDER-TOP-',
'BORDER-TOP-C',
'BORDER-TOP-CH',
'BORDER-TOP-CHA',
'BORDER-TOP-CHAR',
'BORDER-TOP-PIXELS',
'BORDER-TOP-P',
'BORDER-TOP-PI',
'BORDER-TOP-PIX',
'BORDER-TOP-PIXE',
'BORDER-TOP-PIXEL',
'BOX',
'BOX-SELECTABLE',
'BOX-SELECT',
'BOX-SELECTA',
'BOX-SELECTAB',
'BOX-SELECTABL',
'BREAK',
'BROWSE',
'BUFFER',
'BUFFER-CHARS',
'BUFFER-COMPARE',
'BUFFER-COPY',
'BUFFER-CREATE',
'BUFFER-DELETE',
'BUFFER-FIELD',
'BUFFER-HANDLE',
'BUFFER-LINES',
'BUFFER-NAME',
'BUFFER-RELEASE',
'BUFFER-VALUE',
'BUTTON',
'BUTTONS',
'BY',
'BY-POINTER',
'BY-VARIANT-POINTER',
'CACHE',
'CACHE-SIZE',
'CALL',
'CALL-NAME',
'CALL-TYPE',
'CANCEL-BREAK',
'CANCEL-BUTTON',
'CAN-CREATE',
'CAN-DELETE',
'CAN-DO',
'CAN-FIND',
'CAN-QUERY',
'CAN-READ',
'CAN-SET',
'CAN-WRITE',
'CAPS',
'CAREFUL-PAINT',
'CASE',
'CASE-SENSITIVE',
'CASE-SEN',
'CASE-SENS',
'CASE-SENSI',
'CASE-SENSIT',
'CASE-SENSITI',
'CASE-SENSITIV',
'CAST',
'CATCH',
'CDECL',
'CENTERED',
'CENTER',
'CENTERE',
'CHAINED',
'CHARACTER_LENGTH',
'CHARSET',
'CHECK',
'CHECKED',
'CHOOSE',
'CHR',
'CLASS',
'CLASS-TYPE',
'CLEAR',
'CLEAR-APPL-CONTEXT',
'CLEAR-LOG',
'CLEAR-SELECTION',
'CLEAR-SELECT',
'CLEAR-SELECTI',
'CLEAR-SELECTIO',
'CLEAR-SORT-ARROWS',
'CLEAR-SORT-ARROW',
'CLIENT-CONNECTION-ID',
'CLIENT-PRINCIPAL',
'CLIENT-TTY',
'CLIENT-TYPE',
'CLIENT-WORKSTATION',
'CLIPBOARD',
'CLOSE',
'CLOSE-LOG',
'CODE',
'CODEBASE-LOCATOR',
'CODEPAGE',
'CODEPAGE-CONVERT',
'COLLATE',
'COL-OF',
'COLON',
'COLON-ALIGNED',
'COLON-ALIGN',
'COLON-ALIGNE',
'COLOR',
'COLOR-TABLE',
'COLUMN',
'COL',
'COLU',
'COLUM',
'COLUMN-BGCOLOR',
'COLUMN-DCOLOR',
'COLUMN-FGCOLOR',
'COLUMN-FONT',
'COLUMN-LABEL',
'COLUMN-LAB',
'COLUMN-LABE',
'COLUMN-MOVABLE',
'COLUMN-OF',
'COLUMN-PFCOLOR',
'COLUMN-READ-ONLY',
'COLUMN-RESIZABLE',
'COLUMNS',
'COLUMN-SCROLLING',
'COMBO-BOX',
'COMMAND',
'COMPARES',
'COMPILE',
'COMPILER',
'COMPLETE',
'COM-SELF',
'CONFIG-NAME',
'CONNECT',
'CONNECTED',
'CONSTRUCTOR',
'CONTAINS',
'CONTENTS',
'CONTEXT',
'CONTEXT-HELP',
'CONTEXT-HELP-FILE',
'CONTEXT-HELP-ID',
'CONTEXT-POPUP',
'CONTROL',
'CONTROL-BOX',
'CONTROL-FRAME',
'CONVERT',
'CONVERT-3D-COLORS',
'CONVERT-TO-OFFSET',
'CONVERT-TO-OFFS',
'CONVERT-TO-OFFSE',
'COPY-DATASET',
'COPY-LOB',
'COPY-SAX-ATTRIBUTES',
'COPY-TEMP-TABLE',
'COUNT',
'COUNT-OF',
'CPCASE',
'CPCOLL',
'CPINTERNAL',
'CPLOG',
'CPPRINT',
'CPRCODEIN',
'CPRCODEOUT',
'CPSTREAM',
'CPTERM',
'CRC-VALUE',
'CREATE',
'CREATE-LIKE',
'CREATE-LIKE-SEQUENTIAL',
'CREATE-NODE-NAMESPACE',
'CREATE-RESULT-LIST-ENTRY',
'CREATE-TEST-FILE',
'CURRENT',
'CURRENT_DATE',
'CURRENT-CHANGED',
'CURRENT-COLUMN',
'CURRENT-ENVIRONMENT',
'CURRENT-ENV',
'CURRENT-ENVI',
'CURRENT-ENVIR',
'CURRENT-ENVIRO',
'CURRENT-ENVIRON',
'CURRENT-ENVIRONM',
'CURRENT-ENVIRONME',
'CURRENT-ENVIRONMEN',
'CURRENT-ITERATION',
'CURRENT-LANGUAGE',
'CURRENT-LANG',
'CURRENT-LANGU',
'CURRENT-LANGUA',
'CURRENT-LANGUAG',
'CURRENT-QUERY',
'CURRENT-RESULT-ROW',
'CURRENT-ROW-MODIFIED',
'CURRENT-VALUE',
'CURRENT-WINDOW',
'CURSOR',
'CURS',
'CURSO',
'CURSOR-CHAR',
'CURSOR-LINE',
'CURSOR-OFFSET',
'DATABASE',
'DATA-BIND',
'DATA-ENTRY-RETURN',
'DATA-ENTRY-RET',
'DATA-ENTRY-RETU',
'DATA-ENTRY-RETUR',
'DATA-RELATION',
'DATA-REL',
'DATA-RELA',
'DATA-RELAT',
'DATA-RELATI',
'DATA-RELATIO',
'DATASERVERS',
'DATASET',
'DATASET-HANDLE',
'DATA-SOURCE',
'DATA-SOURCE-COMPLETE-MAP',
'DATA-SOURCE-MODIFIED',
'DATA-SOURCE-ROWID',
'DATA-TYPE',
'DATA-T',
'DATA-TY',
'DATA-TYP',
'DATE-FORMAT',
'DATE-F',
'DATE-FO',
'DATE-FOR',
'DATE-FORM',
'DATE-FORMA',
'DAY',
'DBCODEPAGE',
'DBCOLLATION',
'DBNAME',
'DBPARAM',
'DB-REFERENCES',
'DBRESTRICTIONS',
'DBREST',
'DBRESTR',
'DBRESTRI',
'DBRESTRIC',
'DBRESTRICT',
'DBRESTRICTI',
'DBRESTRICTIO',
'DBRESTRICTION',
'DBTASKID',
'DBTYPE',
'DBVERSION',
'DBVERS',
'DBVERSI',
'DBVERSIO',
'DCOLOR',
'DDE',
'DDE-ERROR',
'DDE-ID',
'DDE-I',
'DDE-ITEM',
'DDE-NAME',
'DDE-TOPIC',
'DEBLANK',
'DEBUG',
'DEBU',
'DEBUG-ALERT',
'DEBUGGER',
'DEBUG-LIST',
'DECIMALS',
'DECLARE',
'DECLARE-NAMESPACE',
'DECRYPT',
'DEFAULT',
'DEFAULT-BUFFER-HANDLE',
'DEFAULT-BUTTON',
'DEFAUT-B',
'DEFAUT-BU',
'DEFAUT-BUT',
'DEFAUT-BUTT',
'DEFAUT-BUTTO',
'DEFAULT-COMMIT',
'DEFAULT-EXTENSION',
'DEFAULT-EX',
'DEFAULT-EXT',
'DEFAULT-EXTE',
'DEFAULT-EXTEN',
'DEFAULT-EXTENS',
'DEFAULT-EXTENSI',
'DEFAULT-EXTENSIO',
'DEFAULT-NOXLATE',
'DEFAULT-NOXL',
'DEFAULT-NOXLA',
'DEFAULT-NOXLAT',
'DEFAULT-VALUE',
'DEFAULT-WINDOW',
'DEFINED',
'DEFINE-USER-EVENT-MANAGER',
'DELETE',
'DEL',
'DELE',
'DELET',
'DELETE-CHARACTER',
'DELETE-CHAR',
'DELETE-CHARA',
'DELETE-CHARAC',
'DELETE-CHARACT',
'DELETE-CHARACTE',
'DELETE-CURRENT-ROW',
'DELETE-LINE',
'DELETE-RESULT-LIST-ENTRY',
'DELETE-SELECTED-ROW',
'DELETE-SELECTED-ROWS',
'DELIMITER',
'DESC',
'DESCENDING',
'DESCE',
'DESCEN',
'DESCEND',
'DESCENDI',
'DESCENDIN',
'DESELECT-FOCUSED-ROW',
'DESELECTION',
'DESELECT-ROWS',
'DESELECT-SELECTED-ROW',
'DESTRUCTOR',
'DIALOG-BOX',
'DICTIONARY',
'DICT',
'DICTI',
'DICTIO',
'DICTION',
'DICTIONA',
'DICTIONAR',
'DIR',
'DISABLE',
'DISABLE-AUTO-ZAP',
'DISABLED',
'DISABLE-DUMP-TRIGGERS',
'DISABLE-LOAD-TRIGGERS',
'DISCONNECT',
'DISCON',
'DISCONN',
'DISCONNE',
'DISCONNEC',
'DISP',
'DISPLAY',
'DISPL',
'DISPLA',
'DISPLAY-MESSAGE',
'DISPLAY-TYPE',
'DISPLAY-T',
'DISPLAY-TY',
'DISPLAY-TYP',
'DISTINCT',
'DO',
'DOMAIN-DESCRIPTION',
'DOMAIN-NAME',
'DOMAIN-TYPE',
'DOS',
'DOUBLE',
'DOWN',
'DRAG-ENABLED',
'DROP',
'DROP-DOWN',
'DROP-DOWN-LIST',
'DROP-FILE-NOTIFY',
'DROP-TARGET',
'DUMP',
'DYNAMIC',
'DYNAMIC-FUNCTION',
'EACH',
'ECHO',
'EDGE-CHARS',
'EDGE',
'EDGE-',
'EDGE-C',
'EDGE-CH',
'EDGE-CHA',
'EDGE-CHAR',
'EDGE-PIXELS',
'EDGE-P',
'EDGE-PI',
'EDGE-PIX',
'EDGE-PIXE',
'EDGE-PIXEL',
'EDIT-CAN-PASTE',
'EDIT-CAN-UNDO',
'EDIT-CLEAR',
'EDIT-COPY',
'EDIT-CUT',
'EDITING',
'EDITOR',
'EDIT-PASTE',
'EDIT-UNDO',
'ELSE',
'EMPTY',
'EMPTY-TEMP-TABLE',
'ENABLE',
'ENABLED-FIELDS',
'ENCODE',
'ENCRYPT',
'ENCRYPT-AUDIT-MAC-KEY',
'ENCRYPTION-SALT',
'END',
'END-DOCUMENT',
'END-ELEMENT',
'END-EVENT-GROUP',
'END-FILE-DROP',
'ENDKEY',
'END-KEY',
'END-MOVE',
'END-RESIZE',
'END-ROW-RESIZE',
'END-USER-PROMPT',
'ENTERED',
'ENTRY',
'EQ',
'ERROR',
'ERROR-COLUMN',
'ERROR-COL',
'ERROR-COLU',
'ERROR-COLUM',
'ERROR-ROW',
'ERROR-STACK-TRACE',
'ERROR-STATUS',
'ERROR-STAT',
'ERROR-STATU',
'ESCAPE',
'ETIME',
'EVENT-GROUP-ID',
'EVENT-PROCEDURE',
'EVENT-PROCEDURE-CONTEXT',
'EVENTS',
'EVENT',
'EVENT-TYPE',
'EVENT-T',
'EVENT-TY',
'EVENT-TYP',
'EXCEPT',
'EXCLUSIVE-ID',
'EXCLUSIVE-LOCK',
'EXCLUSIVE',
'EXCLUSIVE-',
'EXCLUSIVE-L',
'EXCLUSIVE-LO',
'EXCLUSIVE-LOC',
'EXCLUSIVE-WEB-USER',
'EXECUTE',
'EXISTS',
'EXP',
'EXPAND',
'EXPANDABLE',
'EXPLICIT',
'EXPORT',
'EXPORT-PRINCIPAL',
'EXTENDED',
'EXTENT',
'EXTERNAL',
'FALSE',
'FETCH',
'FETCH-SELECTED-ROW',
'FGCOLOR',
'FGC',
'FGCO',
'FGCOL',
'FGCOLO',
'FIELD',
'FIELDS',
'FILE',
'FILE-CREATE-DATE',
'FILE-CREATE-TIME',
'FILE-INFORMATION',
'FILE-INFO',
'FILE-INFOR',
'FILE-INFORM',
'FILE-INFORMA',
'FILE-INFORMAT',
'FILE-INFORMATI',
'FILE-INFORMATIO',
'FILE-MOD-DATE',
'FILE-MOD-TIME',
'FILENAME',
'FILE-NAME',
'FILE-OFFSET',
'FILE-OFF',
'FILE-OFFS',
'FILE-OFFSE',
'FILE-SIZE',
'FILE-TYPE',
'FILL',
'FILLED',
'FILL-IN',
'FILTERS',
'FINAL',
'FINALLY',
'FIND',
'FIND-BY-ROWID',
'FIND-CASE-SENSITIVE',
'FIND-CURRENT',
'FINDER',
'FIND-FIRST',
'FIND-GLOBAL',
'FIND-LAST',
'FIND-NEXT-OCCURRENCE',
'FIND-PREV-OCCURRENCE',
'FIND-SELECT',
'FIND-UNIQUE',
'FIND-WRAP-AROUND',
'FIRST',
'FIRST-ASYNCH-REQUEST',
'FIRST-CHILD',
'FIRST-COLUMN',
'FIRST-FORM',
'FIRST-OBJECT',
'FIRST-OF',
'FIRST-PROCEDURE',
'FIRST-PROC',
'FIRST-PROCE',
'FIRST-PROCED',
'FIRST-PROCEDU',
'FIRST-PROCEDUR',
'FIRST-SERVER',
'FIRST-TAB-ITEM',
'FIRST-TAB-I',
'FIRST-TAB-IT',
'FIRST-TAB-ITE',
'FIT-LAST-COLUMN',
'FIXED-ONLY',
'FLAT-BUTTON',
'FLOAT',
'FOCUS',
'FOCUSED-ROW',
'FOCUSED-ROW-SELECTED',
'FONT',
'FONT-TABLE',
'FOR',
'FORCE-FILE',
'FOREGROUND',
'FORE',
'FOREG',
'FOREGR',
'FOREGRO',
'FOREGROU',
'FOREGROUN',
'FORM',
'FORMAT',
'FORMA',
'FORMATTED',
'FORMATTE',
'FORM-LONG-INPUT',
'FORWARD',
'FORWARDS',
'FRAGMENT',
'FRAGMEN',
'FRAME',
'FRAM',
'FRAME-COL',
'FRAME-DB',
'FRAME-DOWN',
'FRAME-FIELD',
'FRAME-FILE',
'FRAME-INDEX',
'FRAME-INDE',
'FRAME-LINE',
'FRAME-NAME',
'FRAME-ROW',
'FRAME-SPACING',
'FRAME-SPA',
'FRAME-SPAC',
'FRAME-SPACI',
'FRAME-SPACIN',
'FRAME-VALUE',
'FRAME-VAL',
'FRAME-VALU',
'FRAME-X',
'FRAME-Y',
'FREQUENCY',
'FROM',
'FROM-CHARS',
'FROM-C',
'FROM-CH',
'FROM-CHA',
'FROM-CHAR',
'FROM-CURRENT',
'FROM-CUR',
'FROM-CURR',
'FROM-CURRE',
'FROM-CURREN',
'FROM-PIXELS',
'FROM-P',
'FROM-PI',
'FROM-PIX',
'FROM-PIXE',
'FROM-PIXEL',
'FULL-HEIGHT-CHARS',
'FULL-HEIGHT',
'FULL-HEIGHT-',
'FULL-HEIGHT-C',
'FULL-HEIGHT-CH',
'FULL-HEIGHT-CHA',
'FULL-HEIGHT-CHAR',
'FULL-HEIGHT-PIXELS',
'FULL-HEIGHT-P',
'FULL-HEIGHT-PI',
'FULL-HEIGHT-PIX',
'FULL-HEIGHT-PIXE',
'FULL-HEIGHT-PIXEL',
'FULL-PATHNAME',
'FULL-PATHN',
'FULL-PATHNA',
'FULL-PATHNAM',
'FULL-WIDTH-CHARS',
'FULL-WIDTH',
'FULL-WIDTH-',
'FULL-WIDTH-C',
'FULL-WIDTH-CH',
'FULL-WIDTH-CHA',
'FULL-WIDTH-CHAR',
'FULL-WIDTH-PIXELS',
'FULL-WIDTH-P',
'FULL-WIDTH-PI',
'FULL-WIDTH-PIX',
'FULL-WIDTH-PIXE',
'FULL-WIDTH-PIXEL',
'FUNCTION',
'FUNCTION-CALL-TYPE',
'GATEWAYS',
'GATEWAY',
'GE',
'GENERATE-MD5',
'GENERATE-PBE-KEY',
'GENERATE-PBE-SALT',
'GENERATE-RANDOM-KEY',
'GENERATE-UUID',
'GET',
'GET-ATTR-CALL-TYPE',
'GET-ATTRIBUTE-NODE',
'GET-BINARY-DATA',
'GET-BLUE-VALUE',
'GET-BLUE',
'GET-BLUE-',
'GET-BLUE-V',
'GET-BLUE-VA',
'GET-BLUE-VAL',
'GET-BLUE-VALU',
'GET-BROWSE-COLUMN',
'GET-BUFFER-HANDLEGETBYTE',
'GET-BYTE',
'GET-CALLBACK-PROC-CONTEXT',
'GET-CALLBACK-PROC-NAME',
'GET-CGI-LIST',
'GET-CGI-LONG-VALUE',
'GET-CGI-VALUE',
'GET-CODEPAGES',
'GET-COLLATIONS',
'GET-CONFIG-VALUE',
'GET-CURRENT',
'GET-DOUBLE',
'GET-DROPPED-FILE',
'GET-DYNAMIC',
'GET-ERROR-COLUMN',
'GET-ERROR-ROW',
'GET-FILE',
'GET-FILE-NAME',
'GET-FILE-OFFSET',
'GET-FILE-OFFSE',
'GET-FIRST',
'GET-FLOAT',
'GET-GREEN-VALUE',
'GET-GREEN',
'GET-GREEN-',
'GET-GREEN-V',
'GET-GREEN-VA',
'GET-GREEN-VAL',
'GET-GREEN-VALU',
'GET-INDEX-BY-NAMESPACE-NAME',
'GET-INDEX-BY-QNAME',
'GET-INT64',
'GET-ITERATION',
'GET-KEY-VALUE',
'GET-KEY-VAL',
'GET-KEY-VALU',
'GET-LAST',
'GET-LOCALNAME-BY-INDEX',
'GET-LONG',
'GET-MESSAGE',
'GET-NEXT',
'GET-NUMBER',
'GET-POINTER-VALUE',
'GET-PREV',
'GET-PRINTERS',
'GET-PROPERTY',
'GET-QNAME-BY-INDEX',
'GET-RED-VALUE',
'GET-RED',
'GET-RED-',
'GET-RED-V',
'GET-RED-VA',
'GET-RED-VAL',
'GET-RED-VALU',
'GET-REPOSITIONED-ROW',
'GET-RGB-VALUE',
'GET-SELECTED-WIDGET',
'GET-SELECTED',
'GET-SELECTED-',
'GET-SELECTED-W',
'GET-SELECTED-WI',
'GET-SELECTED-WID',
'GET-SELECTED-WIDG',
'GET-SELECTED-WIDGE',
'GET-SHORT',
'GET-SIGNATURE',
'GET-SIZE',
'GET-STRING',
'GET-TAB-ITEM',
'GET-TEXT-HEIGHT-CHARS',
'GET-TEXT-HEIGHT',
'GET-TEXT-HEIGHT-',
'GET-TEXT-HEIGHT-C',
'GET-TEXT-HEIGHT-CH',
'GET-TEXT-HEIGHT-CHA',
'GET-TEXT-HEIGHT-CHAR',
'GET-TEXT-HEIGHT-PIXELS',
'GET-TEXT-HEIGHT-P',
'GET-TEXT-HEIGHT-PI',
'GET-TEXT-HEIGHT-PIX',
'GET-TEXT-HEIGHT-PIXE',
'GET-TEXT-HEIGHT-PIXEL',
'GET-TEXT-WIDTH-CHARS',
'GET-TEXT-WIDTH',
'GET-TEXT-WIDTH-',
'GET-TEXT-WIDTH-C',
'GET-TEXT-WIDTH-CH',
'GET-TEXT-WIDTH-CHA',
'GET-TEXT-WIDTH-CHAR',
'GET-TEXT-WIDTH-PIXELS',
'GET-TEXT-WIDTH-P',
'GET-TEXT-WIDTH-PI',
'GET-TEXT-WIDTH-PIX',
'GET-TEXT-WIDTH-PIXE',
'GET-TEXT-WIDTH-PIXEL',
'GET-TYPE-BY-INDEX',
'GET-TYPE-BY-NAMESPACE-NAME',
'GET-TYPE-BY-QNAME',
'GET-UNSIGNED-LONG',
'GET-UNSIGNED-SHORT',
'GET-URI-BY-INDEX',
'GET-VALUE-BY-INDEX',
'GET-VALUE-BY-NAMESPACE-NAME',
'GET-VALUE-BY-QNAME',
'GET-WAIT-STATE',
'GLOBAL',
'GO-ON',
'GO-PENDING',
'GO-PEND',
'GO-PENDI',
'GO-PENDIN',
'GRANT',
'GRAPHIC-EDGE',
'GRAPHIC-E',
'GRAPHIC-ED',
'GRAPHIC-EDG',
'GRID-FACTOR-HORIZONTAL',
'GRID-FACTOR-H',
'GRID-FACTOR-HO',
'GRID-FACTOR-HOR',
'GRID-FACTOR-HORI',
'GRID-FACTOR-HORIZ',
'GRID-FACTOR-HORIZO',
'GRID-FACTOR-HORIZON',
'GRID-FACTOR-HORIZONT',
'GRID-FACTOR-HORIZONTA',
'GRID-FACTOR-VERTICAL',
'GRID-FACTOR-V',
'GRID-FACTOR-VE',
'GRID-FACTOR-VER',
'GRID-FACTOR-VERT',
'GRID-FACTOR-VERTI',
'GRID-FACTOR-VERTIC',
'GRID-FACTOR-VERTICA',
'GRID-SNAP',
'GRID-UNIT-HEIGHT-CHARS',
'GRID-UNIT-HEIGHT',
'GRID-UNIT-HEIGHT-',
'GRID-UNIT-HEIGHT-C',
'GRID-UNIT-HEIGHT-CH',
'GRID-UNIT-HEIGHT-CHA',
'GRID-UNIT-HEIGHT-PIXELS',
'GRID-UNIT-HEIGHT-P',
'GRID-UNIT-HEIGHT-PI',
'GRID-UNIT-HEIGHT-PIX',
'GRID-UNIT-HEIGHT-PIXE',
'GRID-UNIT-HEIGHT-PIXEL',
'GRID-UNIT-WIDTH-CHARS',
'GRID-UNIT-WIDTH',
'GRID-UNIT-WIDTH-',
'GRID-UNIT-WIDTH-C',
'GRID-UNIT-WIDTH-CH',
'GRID-UNIT-WIDTH-CHA',
'GRID-UNIT-WIDTH-CHAR',
'GRID-UNIT-WIDTH-PIXELS',
'GRID-UNIT-WIDTH-P',
'GRID-UNIT-WIDTH-PI',
'GRID-UNIT-WIDTH-PIX',
'GRID-UNIT-WIDTH-PIXE',
'GRID-UNIT-WIDTH-PIXEL',
'GRID-VISIBLE',
'GROUP',
'GT',
'GUID',
'HANDLER',
'HAS-RECORDS',
'HAVING',
'HEADER',
'HEIGHT-CHARS',
'HEIGHT',
'HEIGHT-',
'HEIGHT-C',
'HEIGHT-CH',
'HEIGHT-CHA',
'HEIGHT-CHAR',
'HEIGHT-PIXELS',
'HEIGHT-P',
'HEIGHT-PI',
'HEIGHT-PIX',
'HEIGHT-PIXE',
'HEIGHT-PIXEL',
'HELP',
'HEX-DECODE',
'HEX-ENCODE',
'HIDDEN',
'HIDE',
'HORIZONTAL',
'HORI',
'HORIZ',
'HORIZO',
'HORIZON',
'HORIZONT',
'HORIZONTA',
'HOST-BYTE-ORDER',
'HTML-CHARSET',
'HTML-END-OF-LINE',
'HTML-END-OF-PAGE',
'HTML-FRAME-BEGIN',
'HTML-FRAME-END',
'HTML-HEADER-BEGIN',
'HTML-HEADER-END',
'HTML-TITLE-BEGIN',
'HTML-TITLE-END',
'HWND',
'ICON',
'IF',
'IMAGE',
'IMAGE-DOWN',
'IMAGE-INSENSITIVE',
'IMAGE-SIZE',
'IMAGE-SIZE-CHARS',
'IMAGE-SIZE-C',
'IMAGE-SIZE-CH',
'IMAGE-SIZE-CHA',
'IMAGE-SIZE-CHAR',
'IMAGE-SIZE-PIXELS',
'IMAGE-SIZE-P',
'IMAGE-SIZE-PI',
'IMAGE-SIZE-PIX',
'IMAGE-SIZE-PIXE',
'IMAGE-SIZE-PIXEL',
'IMAGE-UP',
'IMMEDIATE-DISPLAY',
'IMPLEMENTS',
'IMPORT',
'IMPORT-PRINCIPAL',
'IN',
'INCREMENT-EXCLUSIVE-ID',
'INDEX',
'INDEXED-REPOSITION',
'INDEX-HINT',
'INDEX-INFORMATION',
'INDICATOR',
'INFORMATION',
'INFO',
'INFOR',
'INFORM',
'INFORMA',
'INFORMAT',
'INFORMATI',
'INFORMATIO',
'IN-HANDLE',
'INHERIT-BGCOLOR',
'INHERIT-BGC',
'INHERIT-BGCO',
'INHERIT-BGCOL',
'INHERIT-BGCOLO',
'INHERIT-FGCOLOR',
'INHERIT-FGC',
'INHERIT-FGCO',
'INHERIT-FGCOL',
'INHERIT-FGCOLO',
'INHERITS',
'INITIAL',
'INIT',
'INITI',
'INITIA',
'INITIAL-DIR',
'INITIAL-FILTER',
'INITIALIZE-DOCUMENT-TYPE',
'INITIATE',
'INNER-CHARS',
'INNER-LINES',
'INPUT',
'INPUT-OUTPUT',
'INPUT-O',
'INPUT-OU',
'INPUT-OUT',
'INPUT-OUTP',
'INPUT-OUTPU',
'INPUT-VALUE',
'INSERT',
'INSERT-ATTRIBUTE',
'INSERT-BACKTAB',
'INSERT-B',
'INSERT-BA',
'INSERT-BAC',
'INSERT-BACK',
'INSERT-BACKT',
'INSERT-BACKTA',
'INSERT-FILE',
'INSERT-ROW',
'INSERT-STRING',
'INSERT-TAB',
'INSERT-T',
'INSERT-TA',
'INTERFACE',
'INTERNAL-ENTRIES',
'INTO',
'INVOKE',
'IS',
'IS-ATTR-SPACE',
'IS-ATTR',
'IS-ATTR-',
'IS-ATTR-S',
'IS-ATTR-SP',
'IS-ATTR-SPA',
'IS-ATTR-SPAC',
'IS-CLASS',
'IS-CLAS',
'IS-LEAD-BYTE',
'IS-OPEN',
'IS-PARAMETER-SET',
'IS-ROW-SELECTED',
'IS-SELECTED',
'ITEM',
'ITEMS-PER-ROW',
'JOIN',
'JOIN-BY-SQLDB',
'KBLABEL',
'KEEP-CONNECTION-OPEN',
'KEEP-FRAME-Z-ORDER',
'KEEP-FRAME-Z',
'KEEP-FRAME-Z-',
'KEEP-FRAME-Z-O',
'KEEP-FRAME-Z-OR',
'KEEP-FRAME-Z-ORD',
'KEEP-FRAME-Z-ORDE',
'KEEP-MESSAGES',
'KEEP-SECURITY-CACHE',
'KEEP-TAB-ORDER',
'KEY',
'KEYCODE',
'KEY-CODE',
'KEYFUNCTION',
'KEYFUNC',
'KEYFUNCT',
'KEYFUNCTI',
'KEYFUNCTIO',
'KEY-FUNCTION',
'KEY-FUNC',
'KEY-FUNCT',
'KEY-FUNCTI',
'KEY-FUNCTIO',
'KEYLABEL',
'KEY-LABEL',
'KEYS',
'KEYWORD',
'KEYWORD-ALL',
'LABEL',
'LABEL-BGCOLOR',
'LABEL-BGC',
'LABEL-BGCO',
'LABEL-BGCOL',
'LABEL-BGCOLO',
'LABEL-DCOLOR',
'LABEL-DC',
'LABEL-DCO',
'LABEL-DCOL',
'LABEL-DCOLO',
'LABEL-FGCOLOR',
'LABEL-FGC',
'LABEL-FGCO',
'LABEL-FGCOL',
'LABEL-FGCOLO',
'LABEL-FONT',
'LABEL-PFCOLOR',
'LABEL-PFC',
'LABEL-PFCO',
'LABEL-PFCOL',
'LABEL-PFCOLO',
'LABELS',
'LANDSCAPE',
'LANGUAGES',
'LANGUAGE',
'LARGE',
'LARGE-TO-SMALL',
'LAST',
'LAST-ASYNCH-REQUEST',
'LAST-BATCH',
'LAST-CHILD',
'LAST-EVENT',
'LAST-EVEN',
'LAST-FORM',
'LASTKEY',
'LAST-KEY',
'LAST-OBJECT',
'LAST-OF',
'LAST-PROCEDURE',
'LAST-PROCE',
'LAST-PROCED',
'LAST-PROCEDU',
'LAST-PROCEDUR',
'LAST-SERVER',
'LAST-TAB-ITEM',
'LAST-TAB-I',
'LAST-TAB-IT',
'LAST-TAB-ITE',
'LC',
'LDBNAME',
'LE',
'LEAVE',
'LEFT-ALIGNED',
'LEFT-ALIGN',
'LEFT-ALIGNE',
'LEFT-TRIM',
'LENGTH',
'LIBRARY',
'LIKE',
'LIKE-SEQUENTIAL',
'LINE',
'LINE-COUNTER',
'LINE-COUNT',
'LINE-COUNTE',
'LIST-EVENTS',
'LISTING',
'LISTI',
'LISTIN',
'LIST-ITEM-PAIRS',
'LIST-ITEMS',
'LIST-PROPERTY-NAMES',
'LIST-QUERY-ATTRS',
'LIST-SET-ATTRS',
'LIST-WIDGETS',
'LITERAL-QUESTION',
'LITTLE-ENDIAN',
'LOAD',
'LOAD-DOMAINS',
'LOAD-ICON',
'LOAD-IMAGE',
'LOAD-IMAGE-DOWN',
'LOAD-IMAGE-INSENSITIVE',
'LOAD-IMAGE-UP',
'LOAD-MOUSE-POINTER',
'LOAD-MOUSE-P',
'LOAD-MOUSE-PO',
'LOAD-MOUSE-POI',
'LOAD-MOUSE-POIN',
'LOAD-MOUSE-POINT',
'LOAD-MOUSE-POINTE',
'LOAD-PICTURE',
'LOAD-SMALL-ICON',
'LOCAL-NAME',
'LOCATOR-COLUMN-NUMBER',
'LOCATOR-LINE-NUMBER',
'LOCATOR-PUBLIC-ID',
'LOCATOR-SYSTEM-ID',
'LOCATOR-TYPE',
'LOCKED',
'LOCK-REGISTRATION',
'LOG',
'LOG-AUDIT-EVENT',
'LOGIN-EXPIRATION-TIMESTAMP',
'LOGIN-HOST',
'LOGIN-STATE',
'LOG-MANAGER',
'LOGOUT',
'LOOKAHEAD',
'LOOKUP',
'LT',
'MACHINE-CLASS',
'MANDATORY',
'MANUAL-HIGHLIGHT',
'MAP',
'MARGIN-EXTRA',
'MARGIN-HEIGHT-CHARS',
'MARGIN-HEIGHT',
'MARGIN-HEIGHT-',
'MARGIN-HEIGHT-C',
'MARGIN-HEIGHT-CH',
'MARGIN-HEIGHT-CHA',
'MARGIN-HEIGHT-CHAR',
'MARGIN-HEIGHT-PIXELS',
'MARGIN-HEIGHT-P',
'MARGIN-HEIGHT-PI',
'MARGIN-HEIGHT-PIX',
'MARGIN-HEIGHT-PIXE',
'MARGIN-HEIGHT-PIXEL',
'MARGIN-WIDTH-CHARS',
'MARGIN-WIDTH',
'MARGIN-WIDTH-',
'MARGIN-WIDTH-C',
'MARGIN-WIDTH-CH',
'MARGIN-WIDTH-CHA',
'MARGIN-WIDTH-CHAR',
'MARGIN-WIDTH-PIXELS',
'MARGIN-WIDTH-P',
'MARGIN-WIDTH-PI',
'MARGIN-WIDTH-PIX',
'MARGIN-WIDTH-PIXE',
'MARGIN-WIDTH-PIXEL',
'MARK-NEW',
'MARK-ROW-STATE',
'MATCHES',
'MAX-BUTTON',
'MAX-CHARS',
'MAX-DATA-GUESS',
'MAX-HEIGHT',
'MAX-HEIGHT-CHARS',
'MAX-HEIGHT-C',
'MAX-HEIGHT-CH',
'MAX-HEIGHT-CHA',
'MAX-HEIGHT-CHAR',
'MAX-HEIGHT-PIXELS',
'MAX-HEIGHT-P',
'MAX-HEIGHT-PI',
'MAX-HEIGHT-PIX',
'MAX-HEIGHT-PIXE',
'MAX-HEIGHT-PIXEL',
'MAXIMIZE',
'MAXIMUM',
'MAX',
'MAXI',
'MAXIM',
'MAXIMU',
'MAXIMUM-LEVEL',
'MAX-ROWS',
'MAX-SIZE',
'MAX-VALUE',
'MAX-VAL',
'MAX-VALU',
'MAX-WIDTH-CHARS',
'MAX-WIDTH',
'MAX-WIDTH-',
'MAX-WIDTH-C',
'MAX-WIDTH-CH',
'MAX-WIDTH-CHA',
'MAX-WIDTH-CHAR',
'MAX-WIDTH-PIXELS',
'MAX-WIDTH-P',
'MAX-WIDTH-PI',
'MAX-WIDTH-PIX',
'MAX-WIDTH-PIXE',
'MAX-WIDTH-PIXEL',
'MD5-DIGEST',
'MEMBER',
'MEMPTR-TO-NODE-VALUE',
'MENU',
'MENUBAR',
'MENU-BAR',
'MENU-ITEM',
'MENU-KEY',
'MENU-K',
'MENU-KE',
'MENU-MOUSE',
'MENU-M',
'MENU-MO',
'MENU-MOU',
'MENU-MOUS',
'MERGE-BY-FIELD',
'MESSAGE',
'MESSAGE-AREA',
'MESSAGE-AREA-FONT',
'MESSAGE-LINES',
'METHOD',
'MIN-BUTTON',
'MIN-COLUMN-WIDTH-CHARS',
'MIN-COLUMN-WIDTH-C',
'MIN-COLUMN-WIDTH-CH',
'MIN-COLUMN-WIDTH-CHA',
'MIN-COLUMN-WIDTH-CHAR',
'MIN-COLUMN-WIDTH-PIXELS',
'MIN-COLUMN-WIDTH-P',
'MIN-COLUMN-WIDTH-PI',
'MIN-COLUMN-WIDTH-PIX',
'MIN-COLUMN-WIDTH-PIXE',
'MIN-COLUMN-WIDTH-PIXEL',
'MIN-HEIGHT-CHARS',
'MIN-HEIGHT',
'MIN-HEIGHT-',
'MIN-HEIGHT-C',
'MIN-HEIGHT-CH',
'MIN-HEIGHT-CHA',
'MIN-HEIGHT-CHAR',
'MIN-HEIGHT-PIXELS',
'MIN-HEIGHT-P',
'MIN-HEIGHT-PI',
'MIN-HEIGHT-PIX',
'MIN-HEIGHT-PIXE',
'MIN-HEIGHT-PIXEL',
'MINIMUM',
'MIN',
'MINI',
'MINIM',
'MINIMU',
'MIN-SIZE',
'MIN-VALUE',
'MIN-VAL',
'MIN-VALU',
'MIN-WIDTH-CHARS',
'MIN-WIDTH',
'MIN-WIDTH-',
'MIN-WIDTH-C',
'MIN-WIDTH-CH',
'MIN-WIDTH-CHA',
'MIN-WIDTH-CHAR',
'MIN-WIDTH-PIXELS',
'MIN-WIDTH-P',
'MIN-WIDTH-PI',
'MIN-WIDTH-PIX',
'MIN-WIDTH-PIXE',
'MIN-WIDTH-PIXEL',
'MODIFIED',
'MODULO',
'MOD',
'MODU',
'MODUL',
'MONTH',
'MOUSE',
'MOUSE-POINTER',
'MOUSE-P',
'MOUSE-PO',
'MOUSE-POI',
'MOUSE-POIN',
'MOUSE-POINT',
'MOUSE-POINTE',
'MOVABLE',
'MOVE-AFTER-TAB-ITEM',
'MOVE-AFTER',
'MOVE-AFTER-',
'MOVE-AFTER-T',
'MOVE-AFTER-TA',
'MOVE-AFTER-TAB',
'MOVE-AFTER-TAB-',
'MOVE-AFTER-TAB-I',
'MOVE-AFTER-TAB-IT',
'MOVE-AFTER-TAB-ITE',
'MOVE-BEFORE-TAB-ITEM',
'MOVE-BEFOR',
'MOVE-BEFORE',
'MOVE-BEFORE-',
'MOVE-BEFORE-T',
'MOVE-BEFORE-TA',
'MOVE-BEFORE-TAB',
'MOVE-BEFORE-TAB-',
'MOVE-BEFORE-TAB-I',
'MOVE-BEFORE-TAB-IT',
'MOVE-BEFORE-TAB-ITE',
'MOVE-COLUMN',
'MOVE-COL',
'MOVE-COLU',
'MOVE-COLUM',
'MOVE-TO-BOTTOM',
'MOVE-TO-B',
'MOVE-TO-BO',
'MOVE-TO-BOT',
'MOVE-TO-BOTT',
'MOVE-TO-BOTTO',
'MOVE-TO-EOF',
'MOVE-TO-TOP',
'MOVE-TO-T',
'MOVE-TO-TO',
'MPE',
'MULTI-COMPILE',
'MULTIPLE',
'MULTIPLE-KEY',
'MULTITASKING-INTERVAL',
'MUST-EXIST',
'NAME',
'NAMESPACE-PREFIX',
'NAMESPACE-URI',
'NATIVE',
'NE',
'NEEDS-APPSERVER-PROMPT',
'NEEDS-PROMPT',
'NEW',
'NEW-INSTANCE',
'NEW-ROW',
'NEXT',
'NEXT-COLUMN',
'NEXT-PROMPT',
'NEXT-ROWID',
'NEXT-SIBLING',
'NEXT-TAB-ITEM',
'NEXT-TAB-I',
'NEXT-TAB-IT',
'NEXT-TAB-ITE',
'NEXT-VALUE',
'NO',
'NO-APPLY',
'NO-ARRAY-MESSAGE',
'NO-ASSIGN',
'NO-ATTR-LIST',
'NO-ATTR',
'NO-ATTR-',
'NO-ATTR-L',
'NO-ATTR-LI',
'NO-ATTR-LIS',
'NO-ATTR-SPACE',
'NO-ATTR-S',
'NO-ATTR-SP',
'NO-ATTR-SPA',
'NO-ATTR-SPAC',
'NO-AUTO-VALIDATE',
'NO-BIND-WHERE',
'NO-BOX',
'NO-CONSOLE',
'NO-CONVERT',
'NO-CONVERT-3D-COLORS',
'NO-CURRENT-VALUE',
'NO-DEBUG',
'NODE-VALUE-TO-MEMPTR',
'NO-DRAG',
'NO-ECHO',
'NO-EMPTY-SPACE',
'NO-ERROR',
'NO-FILL',
'NO-F',
'NO-FI',
'NO-FIL',
'NO-FOCUS',
'NO-HELP',
'NO-HIDE',
'NO-INDEX-HINT',
'NO-INHERIT-BGCOLOR',
'NO-INHERIT-BGC',
'NO-INHERIT-BGCO',
'NO-INHERIT-FGCOLOR',
'NO-INHERIT-FGC',
'NO-INHERIT-FGCO',
'NO-INHERIT-FGCOL',
'NO-INHERIT-FGCOLO',
'NO-JOIN-BY-SQLDB',
'NO-LABELS',
'NO-LABE',
'NO-LOBS',
'NO-LOCK',
'NO-LOOKAHEAD',
'NO-MAP',
'NO-MESSAGE',
'NO-MES',
'NO-MESS',
'NO-MESSA',
'NO-MESSAG',
'NONAMESPACE-SCHEMA-LOCATION',
'NONE',
'NO-PAUSE',
'NO-PREFETCH',
'NO-PREFE',
'NO-PREFET',
'NO-PREFETC',
'NORMALIZE',
'NO-ROW-MARKERS',
'NO-SCROLLBAR-VERTICAL',
'NO-SEPARATE-CONNECTION',
'NO-SEPARATORS',
'NOT',
'NO-TAB-STOP',
'NOT-ACTIVE',
'NO-UNDERLINE',
'NO-UND',
'NO-UNDE',
'NO-UNDER',
'NO-UNDERL',
'NO-UNDERLI',
'NO-UNDERLIN',
'NO-UNDO',
'NO-VALIDATE',
'NO-VAL',
'NO-VALI',
'NO-VALID',
'NO-VALIDA',
'NO-VALIDAT',
'NOW',
'NO-WAIT',
'NO-WORD-WRAP',
'NULL',
'NUM-ALIASES',
'NUM-ALI',
'NUM-ALIA',
'NUM-ALIAS',
'NUM-ALIASE',
'NUM-BUFFERS',
'NUM-BUTTONS',
'NUM-BUT',
'NUM-BUTT',
'NUM-BUTTO',
'NUM-BUTTON',
'NUM-COLUMNS',
'NUM-COL',
'NUM-COLU',
'NUM-COLUM',
'NUM-COLUMN',
'NUM-COPIES',
'NUM-DBS',
'NUM-DROPPED-FILES',
'NUM-ENTRIES',
'NUMERIC',
'NUMERIC-FORMAT',
'NUMERIC-F',
'NUMERIC-FO',
'NUMERIC-FOR',
'NUMERIC-FORM',
'NUMERIC-FORMA',
'NUM-FIELDS',
'NUM-FORMATS',
'NUM-ITEMS',
'NUM-ITERATIONS',
'NUM-LINES',
'NUM-LOCKED-COLUMNS',
'NUM-LOCKED-COL',
'NUM-LOCKED-COLU',
'NUM-LOCKED-COLUM',
'NUM-LOCKED-COLUMN',
'NUM-MESSAGES',
'NUM-PARAMETERS',
'NUM-REFERENCES',
'NUM-REPLACED',
'NUM-RESULTS',
'NUM-SELECTED-ROWS',
'NUM-SELECTED-WIDGETS',
'NUM-SELECTED',
'NUM-SELECTED-',
'NUM-SELECTED-W',
'NUM-SELECTED-WI',
'NUM-SELECTED-WID',
'NUM-SELECTED-WIDG',
'NUM-SELECTED-WIDGE',
'NUM-SELECTED-WIDGET',
'NUM-TABS',
'NUM-TO-RETAIN',
'NUM-VISIBLE-COLUMNS',
'OCTET-LENGTH',
'OF',
'OFF',
'OK',
'OK-CANCEL',
'OLD',
'ON',
'ON-FRAME-BORDER',
'ON-FRAME',
'ON-FRAME-',
'ON-FRAME-B',
'ON-FRAME-BO',
'ON-FRAME-BOR',
'ON-FRAME-BORD',
'ON-FRAME-BORDE',
'OPEN',
'OPSYS',
'OPTION',
'OR',
'ORDERED-JOIN',
'ORDINAL',
'OS-APPEND',
'OS-COMMAND',
'OS-COPY',
'OS-CREATE-DIR',
'OS-DELETE',
'OS-DIR',
'OS-DRIVES',
'OS-DRIVE',
'OS-ERROR',
'OS-GETENV',
'OS-RENAME',
'OTHERWISE',
'OUTPUT',
'OVERLAY',
'OVERRIDE',
'OWNER',
'PAGE',
'PAGE-BOTTOM',
'PAGE-BOT',
'PAGE-BOTT',
'PAGE-BOTTO',
'PAGED',
'PAGE-NUMBER',
'PAGE-NUM',
'PAGE-NUMB',
'PAGE-NUMBE',
'PAGE-SIZE',
'PAGE-TOP',
'PAGE-WIDTH',
'PAGE-WID',
'PAGE-WIDT',
'PARAMETER',
'PARAM',
'PARAME',
'PARAMET',
'PARAMETE',
'PARENT',
'PARSE-STATUS',
'PARTIAL-KEY',
'PASCAL',
'PASSWORD-FIELD',
'PATHNAME',
'PAUSE',
'PBE-HASH-ALGORITHM',
'PBE-HASH-ALG',
'PBE-HASH-ALGO',
'PBE-HASH-ALGOR',
'PBE-HASH-ALGORI',
'PBE-HASH-ALGORIT',
'PBE-HASH-ALGORITH',
'PBE-KEY-ROUNDS',
'PDBNAME',
'PERSISTENT',
'PERSIST',
'PERSISTE',
'PERSISTEN',
'PERSISTENT-CACHE-DISABLED',
'PFCOLOR',
'PFC',
'PFCO',
'PFCOL',
'PFCOLO',
'PIXELS',
'PIXELS-PER-COLUMN',
'PIXELS-PER-COL',
'PIXELS-PER-COLU',
'PIXELS-PER-COLUM',
'PIXELS-PER-ROW',
'POPUP-MENU',
'POPUP-M',
'POPUP-ME',
'POPUP-MEN',
'POPUP-ONLY',
'POPUP-O',
'POPUP-ON',
'POPUP-ONL',
'PORTRAIT',
'POSITION',
'PRECISION',
'PREFER-DATASET',
'PREPARED',
'PREPARE-STRING',
'PREPROCESS',
'PREPROC',
'PREPROCE',
'PREPROCES',
'PRESELECT',
'PRESEL',
'PRESELE',
'PRESELEC',
'PREV',
'PREV-COLUMN',
'PREV-SIBLING',
'PREV-TAB-ITEM',
'PREV-TAB-I',
'PREV-TAB-IT',
'PREV-TAB-ITE',
'PRIMARY',
'PRINTER',
'PRINTER-CONTROL-HANDLE',
'PRINTER-HDC',
'PRINTER-NAME',
'PRINTER-PORT',
'PRINTER-SETUP',
'PRIVATE',
'PRIVATE-DATA',
'PRIVATE-D',
'PRIVATE-DA',
'PRIVATE-DAT',
'PRIVILEGES',
'PROCEDURE',
'PROCE',
'PROCED',
'PROCEDU',
'PROCEDUR',
'PROCEDURE-CALL-TYPE',
'PROCESS',
'PROC-HANDLE',
'PROC-HA',
'PROC-HAN',
'PROC-HAND',
'PROC-HANDL',
'PROC-STATUS',
'PROC-ST',
'PROC-STA',
'PROC-STAT',
'PROC-STATU',
'proc-text',
'proc-text-buffe',
'PROFILER',
'PROGRAM-NAME',
'PROGRESS',
'PROGRESS-SOURCE',
'PROGRESS-S',
'PROGRESS-SO',
'PROGRESS-SOU',
'PROGRESS-SOUR',
'PROGRESS-SOURC',
'PROMPT',
'PROMPT-FOR',
'PROMPT-F',
'PROMPT-FO',
'PROMSGS',
'PROPATH',
'PROPERTY',
'PROTECTED',
'PROVERSION',
'PROVERS',
'PROVERSI',
'PROVERSIO',
'PROXY',
'PROXY-PASSWORD',
'PROXY-USERID',
'PUBLIC',
'PUBLIC-ID',
'PUBLISH',
'PUBLISHED-EVENTS',
'PUT',
'PUTBYTE',
'PUT-BYTE',
'PUT-DOUBLE',
'PUT-FLOAT',
'PUT-INT64',
'PUT-KEY-VALUE',
'PUT-KEY-VAL',
'PUT-KEY-VALU',
'PUT-LONG',
'PUT-SHORT',
'PUT-STRING',
'PUT-UNSIGNED-LONG',
'QUERY',
'QUERY-CLOSE',
'QUERY-OFF-END',
'QUERY-OPEN',
'QUERY-PREPARE',
'QUERY-TUNING',
'QUESTION',
'QUIT',
'QUOTER',
'RADIO-BUTTONS',
'RADIO-SET',
'RANDOM',
'RAW-TRANSFER',
'RCODE-INFORMATION',
'RCODE-INFO',
'RCODE-INFOR',
'RCODE-INFORM',
'RCODE-INFORMA',
'RCODE-INFORMAT',
'RCODE-INFORMATI',
'RCODE-INFORMATIO',
'READ-AVAILABLE',
'READ-EXACT-NUM',
'READ-FILE',
'READKEY',
'READ-ONLY',
'READ-XML',
'READ-XMLSCHEMA',
'REAL',
'RECORD-LENGTH',
'RECTANGLE',
'RECT',
'RECTA',
'RECTAN',
'RECTANG',
'RECTANGL',
'RECURSIVE',
'REFERENCE-ONLY',
'REFRESH',
'REFRESHABLE',
'REFRESH-AUDIT-POLICY',
'REGISTER-DOMAIN',
'RELEASE',
'REMOTE',
'REMOVE-EVENTS-PROCEDURE',
'REMOVE-SUPER-PROCEDURE',
'REPEAT',
'REPLACE',
'REPLACE-SELECTION-TEXT',
'REPOSITION',
'REPOSITION-BACKWARD',
'REPOSITION-FORWARD',
'REPOSITION-MODE',
'REPOSITION-TO-ROW',
'REPOSITION-TO-ROWID',
'REQUEST',
'RESET',
'RESIZABLE',
'RESIZA',
'RESIZAB',
'RESIZABL',
'RESIZE',
'RESTART-ROW',
'RESTART-ROWID',
'RETAIN',
'RETAIN-SHAPE',
'RETRY',
'RETRY-CANCEL',
'RETURN',
'RETURN-INSERTED',
'RETURN-INS',
'RETURN-INSE',
'RETURN-INSER',
'RETURN-INSERT',
'RETURN-INSERTE',
'RETURNS',
'RETURN-TO-START-DIR',
'RETURN-TO-START-DI',
'RETURN-VALUE',
'RETURN-VAL',
'RETURN-VALU',
'RETURN-VALUE-DATA-TYPE',
'REVERSE-FROM',
'REVERT',
'REVOKE',
'RGB-VALUE',
'RIGHT-ALIGNED',
'RETURN-ALIGN',
'RETURN-ALIGNE',
'RIGHT-TRIM',
'R-INDEX',
'ROLES',
'ROUND',
'ROUTINE-LEVEL',
'ROW',
'ROW-HEIGHT-CHARS',
'ROW-HEIGHT-PIXELS',
'ROW-MARKERS',
'ROW-OF',
'ROW-RESIZABLE',
'RULE',
'RUN',
'RUN-PROCEDURE',
'SAVE',
'SAVE-AS',
'SAVE-FILE',
'SAX-COMPLETE',
'SAX-COMPLE',
'SAX-COMPLET',
'SAX-PARSE',
'SAX-PARSE-FIRST',
'SAX-PARSE-NEXT',
'SAX-PARSER-ERROR',
'SAX-RUNNING',
'SAX-UNINITIALIZED',
'SAX-WRITE-BEGIN',
'SAX-WRITE-COMPLETE',
'SAX-WRITE-CONTENT',
'SAX-WRITE-ELEMENT',
'SAX-WRITE-ERROR',
'SAX-WRITE-IDLE',
'SAX-WRITER',
'SAX-WRITE-TAG',
'SCHEMA',
'SCHEMA-LOCATION',
'SCHEMA-MARSHAL',
'SCHEMA-PATH',
'SCREEN',
'SCREEN-IO',
'SCREEN-LINES',
'SCREEN-VALUE',
'SCREEN-VAL',
'SCREEN-VALU',
'SCROLL',
'SCROLLABLE',
'SCROLLBAR-HORIZONTAL',
'SCROLLBAR-H',
'SCROLLBAR-HO',
'SCROLLBAR-HOR',
'SCROLLBAR-HORI',
'SCROLLBAR-HORIZ',
'SCROLLBAR-HORIZO',
'SCROLLBAR-HORIZON',
'SCROLLBAR-HORIZONT',
'SCROLLBAR-HORIZONTA',
'SCROLL-BARS',
'SCROLLBAR-VERTICAL',
'SCROLLBAR-V',
'SCROLLBAR-VE',
'SCROLLBAR-VER',
'SCROLLBAR-VERT',
'SCROLLBAR-VERTI',
'SCROLLBAR-VERTIC',
'SCROLLBAR-VERTICA',
'SCROLL-DELTA',
'SCROLLED-ROW-POSITION',
'SCROLLED-ROW-POS',
'SCROLLED-ROW-POSI',
'SCROLLED-ROW-POSIT',
'SCROLLED-ROW-POSITI',
'SCROLLED-ROW-POSITIO',
'SCROLLING',
'SCROLL-OFFSET',
'SCROLL-TO-CURRENT-ROW',
'SCROLL-TO-ITEM',
'SCROLL-TO-I',
'SCROLL-TO-IT',
'SCROLL-TO-ITE',
'SCROLL-TO-SELECTED-ROW',
'SDBNAME',
'SEAL',
'SEAL-TIMESTAMP',
'SEARCH',
'SEARCH-SELF',
'SEARCH-TARGET',
'SECTION',
'SECURITY-POLICY',
'SEEK',
'SELECT',
'SELECTABLE',
'SELECT-ALL',
'SELECTED',
'SELECT-FOCUSED-ROW',
'SELECTION',
'SELECTION-END',
'SELECTION-LIST',
'SELECTION-START',
'SELECTION-TEXT',
'SELECT-NEXT-ROW',
'SELECT-PREV-ROW',
'SELECT-ROW',
'SELF',
'SEND',
'send-sql-statement',
'send-sql',
'SENSITIVE',
'SEPARATE-CONNECTION',
'SEPARATOR-FGCOLOR',
'SEPARATORS',
'SERVER',
'SERVER-CONNECTION-BOUND',
'SERVER-CONNECTION-BOUND-REQUEST',
'SERVER-CONNECTION-CONTEXT',
'SERVER-CONNECTION-ID',
'SERVER-OPERATING-MODE',
'SESSION',
'SESSION-ID',
'SET',
'SET-APPL-CONTEXT',
'SET-ATTR-CALL-TYPE',
'SET-ATTRIBUTE-NODE',
'SET-BLUE-VALUE',
'SET-BLUE',
'SET-BLUE-',
'SET-BLUE-V',
'SET-BLUE-VA',
'SET-BLUE-VAL',
'SET-BLUE-VALU',
'SET-BREAK',
'SET-BUFFERS',
'SET-CALLBACK',
'SET-CLIENT',
'SET-COMMIT',
'SET-CONTENTS',
'SET-CURRENT-VALUE',
'SET-DB-CLIENT',
'SET-DYNAMIC',
'SET-EVENT-MANAGER-OPTION',
'SET-GREEN-VALUE',
'SET-GREEN',
'SET-GREEN-',
'SET-GREEN-V',
'SET-GREEN-VA',
'SET-GREEN-VAL',
'SET-GREEN-VALU',
'SET-INPUT-SOURCE',
'SET-OPTION',
'SET-OUTPUT-DESTINATION',
'SET-PARAMETER',
'SET-POINTER-VALUE',
'SET-PROPERTY',
'SET-RED-VALUE',
'SET-RED',
'SET-RED-',
'SET-RED-V',
'SET-RED-VA',
'SET-RED-VAL',
'SET-RED-VALU',
'SET-REPOSITIONED-ROW',
'SET-RGB-VALUE',
'SET-ROLLBACK',
'SET-SELECTION',
'SET-SIZE',
'SET-SORT-ARROW',
'SETUSERID',
'SETUSER',
'SETUSERI',
'SET-WAIT-STATE',
'SHA1-DIGEST',
'SHARED',
'SHARE-LOCK',
'SHARE',
'SHARE-',
'SHARE-L',
'SHARE-LO',
'SHARE-LOC',
'SHOW-IN-TASKBAR',
'SHOW-STATS',
'SHOW-STAT',
'SIDE-LABEL-HANDLE',
'SIDE-LABEL-H',
'SIDE-LABEL-HA',
'SIDE-LABEL-HAN',
'SIDE-LABEL-HAND',
'SIDE-LABEL-HANDL',
'SIDE-LABELS',
'SIDE-LAB',
'SIDE-LABE',
'SIDE-LABEL',
'SILENT',
'SIMPLE',
'SINGLE',
'SIZE',
'SIZE-CHARS',
'SIZE-C',
'SIZE-CH',
'SIZE-CHA',
'SIZE-CHAR',
'SIZE-PIXELS',
'SIZE-P',
'SIZE-PI',
'SIZE-PIX',
'SIZE-PIXE',
'SIZE-PIXEL',
'SKIP',
'SKIP-DELETED-RECORD',
'SLIDER',
'SMALL-ICON',
'SMALLINT',
'SMALL-TITLE',
'SOME',
'SORT',
'SORT-ASCENDING',
'SORT-NUMBER',
'SOURCE',
'SOURCE-PROCEDURE',
'SPACE',
'SQL',
'SQRT',
'SSL-SERVER-NAME',
'STANDALONE',
'START',
'START-DOCUMENT',
'START-ELEMENT',
'START-MOVE',
'START-RESIZE',
'START-ROW-RESIZE',
'STATE-DETAIL',
'STATIC',
'STATUS',
'STATUS-AREA',
'STATUS-AREA-FONT',
'STDCALL',
'STOP',
'STOP-PARSING',
'STOPPED',
'STOPPE',
'STORED-PROCEDURE',
'STORED-PROC',
'STORED-PROCE',
'STORED-PROCED',
'STORED-PROCEDU',
'STORED-PROCEDUR',
'STREAM',
'STREAM-HANDLE',
'STREAM-IO',
'STRETCH-TO-FIT',
'STRICT',
'STRING',
'STRING-VALUE',
'STRING-XREF',
'SUB-AVERAGE',
'SUB-AVE',
'SUB-AVER',
'SUB-AVERA',
'SUB-AVERAG',
'SUB-COUNT',
'SUB-MAXIMUM',
'SUM-MAX',
'SUM-MAXI',
'SUM-MAXIM',
'SUM-MAXIMU',
'SUB-MENU',
'SUBSUB-',
'SUB-MIN',
'SUBSCRIBE',
'SUBSTITUTE',
'SUBST',
'SUBSTI',
'SUBSTIT',
'SUBSTITU',
'SUBSTITUT',
'SUBSTRING',
'SUBSTR',
'SUBSTRI',
'SUBSTRIN',
'SUB-TOTAL',
'SUBTYPE',
'SUM',
'SUPER',
'SUPER-PROCEDURES',
'SUPPRESS-NAMESPACE-PROCESSING',
'SUPPRESS-WARNINGS',
'SUPPRESS-W',
'SUPPRESS-WA',
'SUPPRESS-WAR',
'SUPPRESS-WARN',
'SUPPRESS-WARNI',
'SUPPRESS-WARNIN',
'SUPPRESS-WARNING',
'SYMMETRIC-ENCRYPTION-ALGORITHM',
'SYMMETRIC-ENCRYPTION-IV',
'SYMMETRIC-ENCRYPTION-KEY',
'SYMMETRIC-SUPPORT',
'SYSTEM-ALERT-BOXES',
'SYSTEM-ALERT',
'SYSTEM-ALERT-',
'SYSTEM-ALERT-B',
'SYSTEM-ALERT-BO',
'SYSTEM-ALERT-BOX',
'SYSTEM-ALERT-BOXE',
'SYSTEM-DIALOG',
'SYSTEM-HELP',
'SYSTEM-ID',
'TABLE',
'TABLE-HANDLE',
'TABLE-NUMBER',
'TAB-POSITION',
'TAB-STOP',
'TARGET',
'TARGET-PROCEDURE',
'TEMP-DIRECTORY',
'TEMP-DIR',
'TEMP-DIRE',
'TEMP-DIREC',
'TEMP-DIRECT',
'TEMP-DIRECTO',
'TEMP-DIRECTOR',
'TEMP-TABLE',
'TEMP-TABLE-PREPARE',
'TERM',
'TERMINAL',
'TERMI',
'TERMIN',
'TERMINA',
'TERMINATE',
'TEXT',
'TEXT-CURSOR',
'TEXT-SEG-GROW',
'TEXT-SELECTED',
'THEN',
'THIS-OBJECT',
'THIS-PROCEDURE',
'THREE-D',
'THROW',
'THROUGH',
'THRU',
'TIC-MARKS',
'TIME',
'TIME-SOURCE',
'TITLE',
'TITLE-BGCOLOR',
'TITLE-BGC',
'TITLE-BGCO',
'TITLE-BGCOL',
'TITLE-BGCOLO',
'TITLE-DCOLOR',
'TITLE-DC',
'TITLE-DCO',
'TITLE-DCOL',
'TITLE-DCOLO',
'TITLE-FGCOLOR',
'TITLE-FGC',
'TITLE-FGCO',
'TITLE-FGCOL',
'TITLE-FGCOLO',
'TITLE-FONT',
'TITLE-FO',
'TITLE-FON',
'TO',
'TODAY',
'TOGGLE-BOX',
'TOOLTIP',
'TOOLTIPS',
'TOPIC',
'TOP-NAV-QUERY',
'TOP-ONLY',
'TO-ROWID',
'TOTAL',
'TRAILING',
'TRANS',
'TRANSACTION',
'TRANSACTION-MODE',
'TRANS-INIT-PROCEDURE',
'TRANSPARENT',
'TRIGGER',
'TRIGGERS',
'TRIM',
'TRUE',
'TRUNCATE',
'TRUNC',
'TRUNCA',
'TRUNCAT',
'TYPE',
'TYPE-OF',
'UNBOX',
'UNBUFFERED',
'UNBUFF',
'UNBUFFE',
'UNBUFFER',
'UNBUFFERE',
'UNDERLINE',
'UNDERL',
'UNDERLI',
'UNDERLIN',
'UNDO',
'UNFORMATTED',
'UNFORM',
'UNFORMA',
'UNFORMAT',
'UNFORMATT',
'UNFORMATTE',
'UNION',
'UNIQUE',
'UNIQUE-ID',
'UNIQUE-MATCH',
'UNIX',
'UNLESS-HIDDEN',
'UNLOAD',
'UNSIGNED-LONG',
'UNSUBSCRIBE',
'UP',
'UPDATE',
'UPDATE-ATTRIBUTE',
'URL',
'URL-DECODE',
'URL-ENCODE',
'URL-PASSWORD',
'URL-USERID',
'USE',
'USE-DICT-EXPS',
'USE-FILENAME',
'USE-INDEX',
'USER',
'USE-REVVIDEO',
'USERID',
'USER-ID',
'USE-TEXT',
'USE-UNDERLINE',
'USE-WIDGET-POOL',
'USING',
'V6DISPLAY',
'V6FRAME',
'VALIDATE',
'VALIDATE-EXPRESSION',
'VALIDATE-MESSAGE',
'VALIDATE-SEAL',
'VALIDATION-ENABLED',
'VALID-EVENT',
'VALID-HANDLE',
'VALID-OBJECT',
'VALUE',
'VALUE-CHANGED',
'VALUES',
'VARIABLE',
'VAR',
'VARI',
'VARIA',
'VARIAB',
'VARIABL',
'VERBOSE',
'VERSION',
'VERTICAL',
'VERT',
'VERTI',
'VERTIC',
'VERTICA',
'VIEW',
'VIEW-AS',
'VIEW-FIRST-COLUMN-ON-REOPEN',
'VIRTUAL-HEIGHT-CHARS',
'VIRTUAL-HEIGHT',
'VIRTUAL-HEIGHT-',
'VIRTUAL-HEIGHT-C',
'VIRTUAL-HEIGHT-CH',
'VIRTUAL-HEIGHT-CHA',
'VIRTUAL-HEIGHT-CHAR',
'VIRTUAL-HEIGHT-PIXELS',
'VIRTUAL-HEIGHT-P',
'VIRTUAL-HEIGHT-PI',
'VIRTUAL-HEIGHT-PIX',
'VIRTUAL-HEIGHT-PIXE',
'VIRTUAL-HEIGHT-PIXEL',
'VIRTUAL-WIDTH-CHARS',
'VIRTUAL-WIDTH',
'VIRTUAL-WIDTH-',
'VIRTUAL-WIDTH-C',
'VIRTUAL-WIDTH-CH',
'VIRTUAL-WIDTH-CHA',
'VIRTUAL-WIDTH-CHAR',
'VIRTUAL-WIDTH-PIXELS',
'VIRTUAL-WIDTH-P',
'VIRTUAL-WIDTH-PI',
'VIRTUAL-WIDTH-PIX',
'VIRTUAL-WIDTH-PIXE',
'VIRTUAL-WIDTH-PIXEL',
'VISIBLE',
'VOID',
'WAIT',
'WAIT-FOR',
'WARNING',
'WEB-CONTEXT',
'WEEKDAY',
'WHEN',
'WHERE',
'WHILE',
'WIDGET',
'WIDGET-ENTER',
'WIDGET-E',
'WIDGET-EN',
'WIDGET-ENT',
'WIDGET-ENTE',
'WIDGET-ID',
'WIDGET-LEAVE',
'WIDGET-L',
'WIDGET-LE',
'WIDGET-LEA',
'WIDGET-LEAV',
'WIDGET-POOL',
'WIDTH-CHARS',
'WIDTH',
'WIDTH-',
'WIDTH-C',
'WIDTH-CH',
'WIDTH-CHA',
'WIDTH-CHAR',
'WIDTH-PIXELS',
'WIDTH-P',
'WIDTH-PI',
'WIDTH-PIX',
'WIDTH-PIXE',
'WIDTH-PIXEL',
'WINDOW',
'WINDOW-MAXIMIZED',
'WINDOW-MAXIM',
'WINDOW-MAXIMI',
'WINDOW-MAXIMIZ',
'WINDOW-MAXIMIZE',
'WINDOW-MINIMIZED',
'WINDOW-MINIM',
'WINDOW-MINIMI',
'WINDOW-MINIMIZ',
'WINDOW-MINIMIZE',
'WINDOW-NAME',
'WINDOW-NORMAL',
'WINDOW-STATE',
'WINDOW-STA',
'WINDOW-STAT',
'WINDOW-SYSTEM',
'WITH',
'WORD-INDEX',
'WORD-WRAP',
'WORK-AREA-HEIGHT-PIXELS',
'WORK-AREA-WIDTH-PIXELS',
'WORK-AREA-X',
'WORK-AREA-Y',
'WORKFILE',
'WORK-TABLE',
'WORK-TAB',
'WORK-TABL',
'WRITE',
'WRITE-CDATA',
'WRITE-CHARACTERS',
'WRITE-COMMENT',
'WRITE-DATA-ELEMENT',
'WRITE-EMPTY-ELEMENT',
'WRITE-ENTITY-REF',
'WRITE-EXTERNAL-DTD',
'WRITE-FRAGMENT',
'WRITE-MESSAGE',
'WRITE-PROCESSING-INSTRUCTION',
'WRITE-STATUS',
'WRITE-XML',
'WRITE-XMLSCHEMA',
'X',
'XCODE',
'XML-DATA-TYPE',
'XML-NODE-TYPE',
'XML-SCHEMA-PATH',
'XML-SUPPRESS-NAMESPACE-PROCESSING',
'X-OF',
'XREF',
'XREF-XML',
'Y',
'YEAR',
'YEAR-OFFSET',
'YES',
'YES-NO',
'YES-NO-CANCEL',
'Y-OF'
)
| GarySparrow/mFlaskWeb | venv/Lib/site-packages/pygments/lexers/_openedge_builtins.py | Python | mit | 48,362 |
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
MAXREPEAT = 2147483648
#from _sre import MAXREPEAT
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d.items(), key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
| 2014c2g12/c2g12 | wsgi/w2/static/Brython2.0.0-20140209-164925/Lib/sre_constants.py | Python | gpl-2.0 | 7,171 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class sale_report(osv.osv):
_name = "sale.report"
_description = "Sales Orders Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.datetime('Date Order', readonly=True),
'date_confirm': fields.date('Date Confirm', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', readonly=True),
'product_uom_qty': fields.float('# of Qty', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Price', readonly=True),
'delay': fields.float('Commitment Delay', digits=(16,2), readonly=True),
'categ_id': fields.many2one('product.category','Category of Product', readonly=True),
'nbr': fields.integer('# of Lines', readonly=True),
'state': fields.selection([
('draft', 'Quotation'),
('waiting_date', 'Waiting Schedule'),
('manual', 'Manual In Progress'),
('progress', 'In Progress'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
], 'Order Status', readonly=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_order = 'date desc'
def _select(self):
select_str = """
SELECT min(l.id) as id,
l.product_id as product_id,
t.uom_id as product_uom,
sum(l.product_uom_qty / u.factor * u2.factor) as product_uom_qty,
sum(l.product_uom_qty * l.price_unit * (100.0-l.discount) / 100.0) as price_total,
count(*) as nbr,
s.date_order as date,
s.date_confirm as date_confirm,
s.partner_id as partner_id,
s.user_id as user_id,
s.company_id as company_id,
extract(epoch from avg(date_trunc('day',s.date_confirm)-date_trunc('day',s.create_date)))/(24*60*60)::decimal(16,2) as delay,
s.state,
t.categ_id as categ_id,
s.pricelist_id as pricelist_id,
s.project_id as analytic_account_id,
s.section_id as section_id
"""
return select_str
def _from(self):
from_str = """
sale_order_line l
join sale_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY l.product_id,
l.order_id,
t.uom_id,
t.categ_id,
s.date_order,
s.date_confirm,
s.partner_id,
s.user_id,
s.company_id,
s.state,
s.pricelist_id,
s.project_id,
s.section_id
"""
return group_by_str
def init(self, cr):
# self._table = sale_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM ( %s )
%s
)""" % (self._table, self._select(), self._from(), self._group_by()))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| bealdav/OpenUpgrade | addons/sale/report/sale_report.py | Python | agpl-3.0 | 5,204 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Attendances',
'version': '1.1',
'category': 'Human Resources',
'description': """
This module aims to manage employee's attendances.
==================================================
Keeps account of the attendances of the employees on the basis of the
actions(Sign in/Sign out) performed by them.
""",
'author': 'OpenERP SA',
'images': ['images/hr_attendances.jpeg'],
'depends': ['hr'],
'data': [
'security/ir_rule.xml',
'security/ir.model.access.csv',
'hr_attendance_view.xml',
'hr_attendance_report.xml',
'wizard/hr_attendance_bymonth_view.xml',
'wizard/hr_attendance_byweek_view.xml',
'wizard/hr_attendance_error_view.xml',
'res_config_view.xml',
],
'demo': ['hr_attendance_demo.xml'],
'test': [
'test/attendance_process.yml',
'test/hr_attendance_report.yml',
],
'installable': True,
'auto_install': False,
#web
"js": ["static/src/js/attendance.js"],
'qweb' : ["static/src/xml/attendance.xml"],
'css' : ["static/src/css/slider.css"],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| inovtec-solutions/OpenERP | openerp/addons/hr_attendance/__openerp__.py | Python | agpl-3.0 | 2,163 |
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Compression implementations for a Transport.
"""
import zlib
class ZlibCompressor (object):
def __init__(self):
self.z = zlib.compressobj(9)
def __call__(self, data):
return self.z.compress(data) + self.z.flush(zlib.Z_FULL_FLUSH)
class ZlibDecompressor (object):
def __init__(self):
self.z = zlib.decompressobj()
def __call__(self, data):
return self.z.decompress(data)
| mytliulei/DCNRobotInstallPackages | windows/win32/paramiko-1.14.0/paramiko/compress.py | Python | apache-2.0 | 1,245 |
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. only:: html\n\n')
out.append(' .. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
if is_backref:
out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name)
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| NelisVerhoef/scikit-learn | doc/sphinxext/gen_rst.py | Python | bsd-3-clause | 40,198 |
from __future__ import unicode_literals
from functools import total_ordering
from django.contrib.gis.geos import (
LinearRing, LineString, Point, Polygon, fromstr,
)
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import html_safe
@html_safe
@python_2_unicode_compatible
class GEvent(object):
"""
A Python wrapper for the Google GEvent object.
Events can be attached to any object derived from GOverlayBase with the
add_event() call.
For more information please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#event
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google import GoogleMap, GEvent, GPolyline
def sample_request(request):
polyline = GPolyline('LINESTRING(101 26, 112 26, 102 31)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
polyline.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(polylines=[polyline])})
"""
def __init__(self, event, action):
"""
Initializes a GEvent object.
Parameters:
event:
string for the event, such as 'click'. The event must be a valid
event for the object in the Google Maps API.
There is no validation of the event type within Django.
action:
string containing a Javascript function, such as
'function() { location.href = "newurl";}'
The string must be a valid Javascript function. Again there is no
validation fo the function within Django.
"""
self.event = event
self.action = action
def __str__(self):
"Returns the parameter part of a GEvent."
return '"%s", %s' % (self.event, self.action)
@html_safe
@python_2_unicode_compatible
class GOverlayBase(object):
def __init__(self):
self.events = []
def latlng_from_coords(self, coords):
"Generates a JavaScript array of GLatLng objects for the given coordinates."
return '[%s]' % ','.join('new GLatLng(%s,%s)' % (y, x) for x, y in coords)
def add_event(self, event):
"Attaches a GEvent to the overlay object."
self.events.append(event)
def __str__(self):
"The string representation is the JavaScript API call."
return '%s(%s)' % (self.__class__.__name__, self.js_params)
class GPolygon(GOverlayBase):
"""
A Python wrapper for the Google GPolygon object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Polygon
"""
def __init__(self, poly,
stroke_color='#0000ff', stroke_weight=2, stroke_opacity=1,
fill_color='#0000ff', fill_opacity=0.4):
"""
The GPolygon object initializes on a GEOS Polygon or a parameter that
may be instantiated into GEOS Polygon. Please note that this will not
depict a Polygon's internal rings.
Keyword Options:
stroke_color:
The color of the polygon outline. Defaults to '#0000ff' (blue).
stroke_weight:
The width of the polygon outline, in pixels. Defaults to 2.
stroke_opacity:
The opacity of the polygon outline, between 0 and 1. Defaults to 1.
fill_color:
The color of the polygon fill. Defaults to '#0000ff' (blue).
fill_opacity:
The opacity of the polygon fill. Defaults to 0.4.
"""
if isinstance(poly, six.string_types):
poly = fromstr(poly)
if isinstance(poly, (tuple, list)):
poly = Polygon(poly)
if not isinstance(poly, Polygon):
raise TypeError('GPolygon may only initialize on GEOS Polygons.')
# Getting the envelope of the input polygon (used for automatically
# determining the zoom level).
self.envelope = poly.envelope
# Translating the coordinates into a JavaScript array of
# Google `GLatLng` objects.
self.points = self.latlng_from_coords(poly.shell.coords)
# Stroke settings.
self.stroke_color, self.stroke_opacity, self.stroke_weight = stroke_color, stroke_opacity, stroke_weight
# Fill settings.
self.fill_color, self.fill_opacity = fill_color, fill_opacity
super(GPolygon, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s, "%s", %s' % (self.points, self.stroke_color, self.stroke_weight, self.stroke_opacity,
self.fill_color, self.fill_opacity)
class GPolyline(GOverlayBase):
"""
A Python wrapper for the Google GPolyline object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Polyline
"""
def __init__(self, geom, color='#0000ff', weight=2, opacity=1):
"""
The GPolyline object may be initialized on GEOS LineStirng, LinearRing,
and Polygon objects (internal rings not supported) or a parameter that
may instantiated into one of the above geometries.
Keyword Options:
color:
The color to use for the polyline. Defaults to '#0000ff' (blue).
weight:
The width of the polyline, in pixels. Defaults to 2.
opacity:
The opacity of the polyline, between 0 and 1. Defaults to 1.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Polygon(geom)
# Generating the lat/lng coordinate pairs.
if isinstance(geom, (LineString, LinearRing)):
self.latlngs = self.latlng_from_coords(geom.coords)
elif isinstance(geom, Polygon):
self.latlngs = self.latlng_from_coords(geom.shell.coords)
else:
raise TypeError('GPolyline may only initialize on GEOS LineString, LinearRing, and/or Polygon geometries.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
self.color, self.weight, self.opacity = color, weight, opacity
super(GPolyline, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s' % (self.latlngs, self.color, self.weight, self.opacity)
@total_ordering
class GIcon(object):
"""
Creates a GIcon object to pass into a Gmarker object.
The keyword arguments map to instance attributes of the same name. These,
in turn, correspond to a subset of the attributes of the official GIcon
javascript object:
https://developers.google.com/maps/documentation/javascript/reference#Icon
Because a Google map often uses several different icons, a name field has
been added to the required arguments.
Required Arguments:
varname:
A string which will become the basis for the js variable name of
the marker, for this reason, your code should assign a unique
name for each GIcon you instantiate, otherwise there will be
name space collisions in your javascript.
Keyword Options:
image:
The url of the image to be used as the icon on the map defaults
to 'G_DEFAULT_ICON'
iconsize:
a tuple representing the pixel size of the foreground (not the
shadow) image of the icon, in the format: (width, height) ex.:
GIcon('fast_food',
image="/media/icon/star.png",
iconsize=(15,10))
Would indicate your custom icon was 15px wide and 10px height.
shadow:
the url of the image of the icon's shadow
shadowsize:
a tuple representing the pixel size of the shadow image, format is
the same as ``iconsize``
iconanchor:
a tuple representing the pixel coordinate relative to the top left
corner of the icon image at which this icon is anchored to the map.
In (x, y) format. x increases to the right in the Google Maps
coordinate system and y increases downwards in the Google Maps
coordinate system.)
infowindowanchor:
The pixel coordinate relative to the top left corner of the icon
image at which the info window is anchored to this icon.
"""
def __init__(self, varname, image=None, iconsize=None,
shadow=None, shadowsize=None, iconanchor=None,
infowindowanchor=None):
self.varname = varname
self.image = image
self.iconsize = iconsize
self.shadow = shadow
self.shadowsize = shadowsize
self.iconanchor = iconanchor
self.infowindowanchor = infowindowanchor
def __eq__(self, other):
return self.varname == other.varname
def __lt__(self, other):
return self.varname < other.varname
def __hash__(self):
# XOR with hash of GIcon type so that hash('varname') won't
# equal hash(GIcon('varname')).
return hash(self.__class__) ^ hash(self.varname)
class GMarker(GOverlayBase):
"""
A Python wrapper for the Google GMarker object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Marker
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google.overlays import GMarker, GEvent
def sample_request(request):
marker = GMarker('POINT(101 26)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
marker.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(markers=[marker])})
"""
def __init__(self, geom, title=None, draggable=False, icon=None):
"""
The GMarker object may initialize on GEOS Points or a parameter
that may be instantiated into a GEOS point. Keyword options map to
GMarkerOptions -- so far only the title option is supported.
Keyword Options:
title:
Title option for GMarker, will be displayed as a tooltip.
draggable:
Draggable option for GMarker, disabled by default.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Point(geom)
if isinstance(geom, Point):
self.latlng = self.latlng_from_coords(geom.coords)
else:
raise TypeError('GMarker may only initialize on GEOS Point geometry.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
# TODO: Add support for more GMarkerOptions
self.title = title
self.draggable = draggable
self.icon = icon
super(GMarker, self).__init__()
def latlng_from_coords(self, coords):
return 'new GLatLng(%s,%s)' % (coords[1], coords[0])
def options(self):
result = []
if self.title:
result.append('title: "%s"' % self.title)
if self.icon:
result.append('icon: %s' % self.icon.varname)
if self.draggable:
result.append('draggable: true')
return '{%s}' % ','.join(result)
@property
def js_params(self):
return '%s, %s' % (self.latlng, self.options())
| Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/contrib/gis/maps/google/overlays.py | Python | artistic-2.0 | 11,955 |
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
import hr_payroll_contribution_register_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| diogocs1/comps | web/addons/hr_payroll/wizard/__init__.py | Python | apache-2.0 | 1,159 |
# -*- coding: utf-8 -*-
"""
jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = '2.7.1'
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction'
]
| Drvanon/Game | venv/lib/python3.3/site-packages/jinja2/__init__.py | Python | apache-2.0 | 2,270 |
from __future__ import unicode_literals
import logging
from oauthlib.common import generate_token, urldecode
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import TokenExpiredError, is_secure_transport
import requests
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
class TokenUpdated(Warning):
def __init__(self, token):
super(TokenUpdated, self).__init__()
self.token = token
class OAuth2Session(requests.Session):
"""Versatile OAuth 2 extension to :class:`requests.Session`.
Supports any grant type adhering to :class:`oauthlib.oauth2.Client` spec
including the four core OAuth 2 grants.
Can be used to create authorization urls, fetch tokens and access protected
resources using the :class:`requests.Session` interface you are used to.
- :class:`oauthlib.oauth2.WebApplicationClient` (default): Authorization Code Grant
- :class:`oauthlib.oauth2.MobileApplicationClient`: Implicit Grant
- :class:`oauthlib.oauth2.LegacyApplicationClient`: Password Credentials Grant
- :class:`oauthlib.oauth2.BackendApplicationClient`: Client Credentials Grant
Note that the only time you will be using Implicit Grant from python is if
you are driving a user agent able to obtain URL fragments.
"""
def __init__(self, client_id=None, client=None, auto_refresh_url=None,
auto_refresh_kwargs=None, scope=None, redirect_uri=None, token=None,
state=None, token_updater=None, **kwargs):
"""Construct a new OAuth 2 client session.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param scope: List of scopes you wish to request access to
:param redirect_uri: Redirect URI you registered as callback
:param token: Token dictionary, must include access_token
and token_type.
:param state: State string used to prevent CSRF. This will be given
when creating the authorization url and must be supplied
when parsing the authorization response.
Can be either a string or a no argument callable.
:auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
this if you wish the client to automatically refresh
your access tokens.
:auto_refresh_kwargs: Extra arguments to pass to the refresh token
endpoint.
:token_updater: Method with one argument, token, to be used to update
your token databse on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session constructor.
"""
super(OAuth2Session, self).__init__(**kwargs)
self.client_id = client_id
if client is not None and not self.client_id:
self.client_id = client.client_id
self.scope = scope
self.redirect_uri = redirect_uri
self.token = token or {}
self.state = state or generate_token
self._state = state
self.auto_refresh_url = auto_refresh_url
self.auto_refresh_kwargs = auto_refresh_kwargs or {}
self.token_updater = token_updater
self._client = client or WebApplicationClient(client_id, token=token)
self._client._populate_attributes(token or {})
# Allow customizations for non compliant providers through various
# hooks to adjust requests and responses.
self.compliance_hook = {
'access_token_response': set([]),
'refresh_token_response': set([]),
'protected_request': set([]),
}
def new_state(self):
"""Generates a state string to be used in authorizations."""
try:
self._state = self.state()
log.debug('Generated new state %s.', self._state)
except TypeError:
self._state = self.state
log.debug('Re-using previously supplied state %s.', self._state)
return self._state
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
return bool(self._client.access_token)
def authorization_url(self, url, state=None, **kwargs):
"""Form an authorization URL.
:param url: Authorization endpoint url, must be HTTPS.
:param state: An optional state string for CSRF protection. If not
given it will be generated for you.
:param kwargs: Extra parameters to include.
:return: authorization_url, state
"""
state = state or self.new_state()
return self._client.prepare_request_uri(url,
redirect_uri=self.redirect_uri,
scope=self.scope,
state=state,
**kwargs), state
def fetch_token(self, token_url, code=None, authorization_response=None,
body='', auth=None, username=None, password=None, method='POST',
timeout=None, headers=None, verify=True, **kwargs):
"""Generic method for fetching an access token from the token endpoint.
If you are using the MobileApplicationClient you will want to use
token_from_fragment instead of fetch_token.
:param token_url: Token endpoint URL, must use HTTPS.
:param code: Authorization code (used by WebApplicationClients).
:param authorization_response: Authorization response URL, the callback
URL of the request back to you. Used by
WebApplicationClients instead of code.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param username: Username used by LegacyApplicationClients.
:param password: Password used by LegacyApplicationClients.
:param method: The HTTP method used to make the request. Defaults
to POST, but may also be GET. Other methods should
be added as needed.
:param headers: Dict to default request headers with.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
if not code and authorization_response:
self._client.parse_request_uri_response(authorization_response,
state=self._state)
code = self._client.code
elif not code and isinstance(self._client, WebApplicationClient):
code = self._client.code
if not code:
raise ValueError('Please supply either code or '
'authorization_code parameters.')
body = self._client.prepare_request_body(code=code, body=body,
redirect_uri=self.redirect_uri, username=username,
password=password, **kwargs)
headers = headers or {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
if method.upper() == 'POST':
r = self.post(token_url, data=dict(urldecode(body)),
timeout=timeout, headers=headers, auth=auth,
verify=verify)
log.debug('Prepared fetch token request body %s', body)
elif method.upper() == 'GET':
# if method is not 'POST', switch body to querystring and GET
r = self.get(token_url, params=dict(urldecode(body)),
timeout=timeout, headers=headers, auth=auth,
verify=verify)
log.debug('Prepared fetch token request querystring %s', body)
else:
raise ValueError('The method kwarg must be POST or GET.')
log.debug('Request to fetch token completed with status %s.',
r.status_code)
log.debug('Request headers were %s', r.request.headers)
log.debug('Request body was %s', r.request.body)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['access_token_response']))
for hook in self.compliance_hook['access_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
r.raise_for_status()
self._client.parse_request_body_response(r.text, scope=self.scope)
self.token = self._client.token
log.debug('Obtained token %s.', self.token)
return self.token
def token_from_fragment(self, authorization_response):
"""Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
"""
self._client.parse_request_uri_response(authorization_response,
state=self._state)
self.token = self._client.token
return self.token
def refresh_token(self, token_url, refresh_token=None, body='', auth=None,
timeout=None, verify=True, **kwargs):
"""Fetch a new access token using a refresh token.
:param token_url: The token endpoint, must be HTTPS.
:param refresh_token: The refresh_token to use.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not token_url:
raise ValueError('No token endpoint set for auto_refresh.')
if not is_secure_transport(token_url):
raise InsecureTransportError()
# Need to nullify token to prevent it from being added to the request
refresh_token = refresh_token or self.token.get('refresh_token')
self.token = {}
log.debug("Setting redirect uri to " + self.refresher.redirect_uri)
kwargs['redirect_uri'] = self.refresher.redirect_uri
log.debug('Adding auto refresh key word arguments %s.',
self.auto_refresh_kwargs)
kwargs.update(self.auto_refresh_kwargs)
log.debug('Prepared refresh token request body pre- %s', body)
body = self._client.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=self.scope, **kwargs)
log.debug('Prepared refresh token request body %s', body)
log.debug(auth)
r = self.post(token_url, data=dict(urldecode(body)), auth=auth,
timeout=timeout, verify=verify)
log.debug('Request to refresh token completed with status %s.',
r.status_code)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['refresh_token_response']))
for hook in self.compliance_hook['refresh_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
self.token = self._client.parse_request_body_response(r.text, scope=self.scope)
if 'refresh_token' not in self.token:
log.debug('No new refresh token given. Re-using old.')
self.token['refresh_token'] = refresh_token
return self.token
def request(self, method, url, data=None, headers=None, **kwargs):
"""Intercept all requests and add the OAuth 2 token if present."""
if not is_secure_transport(url):
raise InsecureTransportError()
if self.token:
log.debug('Invoking %d protected resource request hooks.',
len(self.compliance_hook['protected_request']))
for hook in self.compliance_hook['protected_request']:
url, headers, data = hook(url, headers, data)
try:
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
# Attempt to retrieve and save new access token if expired
except TokenExpiredError:
if self.auto_refresh_url:
token = self.refresh_token(self.auto_refresh_url,auth=self.refresher.auth,**kwargs)
if self.token_updater:
self.token_updater(token)
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
else:
raise TokenUpdated(token)
else:
raise
return super(OAuth2Session, self).request(method, url,
headers=headers, data=data, **kwargs)
def register_compliance_hook(self, hook_type, hook):
"""Register a hook for request/response tweaking.
Available hooks are:
access_token_response invoked before token parsing.
refresh_token_response invoked before refresh token parsing.
protected_request invoked before making a request.
If you find a new hook is needed please send a GitHub PR request
or open an issue.
"""
if hook_type not in self.compliance_hook:
raise ValueError('Hook type %s is not in %s.',
hook_type, self.compliance_hook)
self.compliance_hook[hook_type].add(hook)
| lucidbard/requests-oauthlib | requests_oauthlib/oauth2_session.py | Python | isc | 14,834 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import os
import os.path
import numpy as np
from utils import util
from utils.verilog import top_harness_clk
from prjuray.db import Database
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
for site, site_type in gridinfo.sites.items():
if site_type in ['SLICEM']:
yield site
def emit_ultra_slice_memory(f, site, root_cell, modes, dimuxes):
ram = modes[-1].startswith('RAM')
# First unplace all elements
print('set site [get_sites {}]'.format(site), file=f)
print('unplace_cell [get_cells -of $site]', file=f)
print('', file=f)
def loc_cell(name, c, leaf, lut):
bel = '{c}{lut}LUT'.format(c=c.upper(), lut=lut)
print(
'set {name} [get_cells {root_cell}/{c}lut_i/{leaf}]'.format(
root_cell=root_cell, name=name, c=c, leaf=leaf),
file=f)
print(
'set_property BEL {bel} ${name}'.format(bel=bel, name=name),
file=f)
print(
'set_property LOC {site} ${name}'.format(site=site, name=name),
file=f)
print('', file=f)
if ram:
for c, mode, dimux in zip('hgfedcba', modes[::-1], dimuxes[::-1]):
if mode in ['RAMD64', 'RAMS64']:
loc_cell('ram', c, 'ram_i', lut='6')
elif mode in ['RAMD32', 'RAMS32']:
loc_cell('ram1', c, 'ram1_i', lut='6')
loc_cell('ram0', c, 'ram0_i', lut='5')
elif mode == 'LOGIC':
loc_cell('lut6', c, 'lut6', lut='6')
loc_cell('lut5', c, 'lut5', lut='5')
else:
assert False, mode
else:
for c, mode, dimux in zip('abcdefgh', modes, dimuxes):
if mode == 'LOGIC':
loc_cell('lut6', c, 'lut6', lut='6')
loc_cell('lut5', c, 'lut5', lut='5')
elif mode == 'SRL16':
loc_cell('srl6', c, 'srl6', lut='6')
loc_cell('srl5', c, 'srl5', lut='5')
elif mode == 'SRL32':
loc_cell('srl6', c, 'srl6', lut='6')
else:
assert False, mode
def print_top(seed):
np.random.seed(seed)
DCLK_N = 10
DIN_N = (16 * 2 + 8)
DOUT_N = 32
top_harness_clk(DCLK_N, DIN_N, DOUT_N)
print("")
print("""
module roi(input [{DCLK_N}-1:0] clk, input [{DIN_N}-1:0] din, output [31:0] dout);
wire [15:0] sr;
wire [15:0] ce;
wire [7:0] d;
wire [31:0] q;
assign sr = din[0+:16];
assign ce = din[16+:16];
assign d = din[32+:8];
assign dout = q;
""".format(DCLK_N=DCLK_N, DIN_N=DIN_N, DOUT_N=DOUT_N))
N = 150
D = ["d[%d]" % i for i in range(8)]
slices = sorted(gen_sites())
np.random.shuffle(slices)
with open('top.tcl', 'w') as f:
for i in range(N):
sl = slices.pop()
clk = tuple(np.random.randint(DCLK_N, size=2))
wclk = None
while wclk is None or wclk in clk:
wclk = np.random.randint(DCLK_N)
sr = tuple([
"1'b1" if y >= 16 else "sr[%d]" % y
for y in np.random.randint(25, size=2)
])
ce = tuple([
"1'b1" if y >= 16 else "ce[%d]" % y
for y in np.random.randint(25, size=4)
])
we = np.random.randint(16)
def random_fftype(mode):
if mode == 0:
return np.random.choice(["NONE", "FDSE", "FDRE"])
elif mode == 1:
return np.random.choice(["NONE", "FDPE", "FDCE"])
elif mode == 2:
return np.random.choice(["NONE", "LDPE", "LDCE"])
def random_bit():
return np.random.choice(D)
def random_data(width):
return "{%s}" % (", ".join(
[random_bit() for k in range(width)]))
#fftypes = [random_fftype(ffmode[j // 8]) for j in range(16)]
fftypes = ["NONE" for j in range(16)]
dimux = []
mode = []
ram_legal = True
for lut in "HGFEDCBA":
choices = ["LOGIC"]
if len(dimux) >= 2 and dimux[1] == 'SIN':
mode.append("SRL32")
dimux.append("SIN")
continue
if lut == "H":
choices += ["RAMD64", "RAMS64", "RAMD32", "SRL16", "SRL32"]
else:
if mode[0][0:3] != "RAM":
choices += ["SRL16", "SRL32"]
if ram_legal:
choices.append(mode[0])
p = [0.1]
for j in range(1, len(choices)):
p.append(0.9 / (len(choices) - 1))
if len(choices) == 1:
p[0] = 1
next_mode = np.random.choice(choices, p=p)
if len(mode
) > 0 and mode[-1] == "SRL32" and next_mode == "SRL32":
dimux.append(np.random.choice(["DI", "SIN"], p=[0.2, 0.8]))
else:
dimux.append("DI")
if next_mode[0:3] != "RAM":
ram_legal = False
mode.append(next_mode)
dimux = list(reversed(dimux))
mode = list(reversed(mode))
emit_ultra_slice_memory(f, sl, 'roi/slice{}'.format(i), mode,
dimux)
print(' wire [31:0] d%d;' % i)
print(' ultra_slice_memory #(')
print(' .LOC("%s"),' % sl)
for j in range(8):
print(' .%s_MODE("%s"),' % ("ABCDEFGH" [j], mode[j]))
for lut in "ABCDEFGH":
print(" .%sLUT_INIT(64'b%s)," % (lut, "".join(
str(_) for _ in np.random.randint(2, size=64))))
for j in range(16):
print(' .%sFF%s_TYPE("%s"),' %
("ABCDEFGH" [j // 2], "2" if
(j % 2) == 1 else "", fftypes[j]))
print(" .FF_INIT(16'b%s)," % "".join(
str(_) for _ in np.random.randint(2, size=16)))
for j1 in "ABCDEFGH":
for j2 in ("1", "2"):
print(' .FFMUX%s%s("%s"),' %
(j1, j2, np.random.choice(["F7F8", "D6", "D5"])))
for j in "ABCDEFGH":
print(' .OUTMUX%s("%s"),' %
(j, np.random.choice(["F7F8", "D6", "D5"])))
for j in range(7):
print(' .DIMUX%s("%s"),' % ("ABCDEFG" [j], dimux[j]))
print(" .WCLKINV(1'd%d)," % np.random.randint(2))
waused = np.random.randint(4)
print(" .WA6USED(1'd%d)," % (1 if waused > 0 else 0))
print(" .WA7USED(1'd%d)," % (1 if waused > 1 else 0))
print(" .WA8USED(1'd%d)," % (1 if waused > 2 else 0))
print(" .CLKINV(2'd%d)," % np.random.randint(4))
print(" .SRINV(2'd%d)" % np.random.randint(4))
print(' ) slice%d (' % i)
for j in range(1, 7):
print(" .A%d(%s)," % (j, random_data(8)))
print(" .I(%s)," % random_data(8))
print(" .X(%s)," % random_data(8))
print(" .CLK({clk[%d], clk[%d]})," % clk[0:2])
print(" .WCLK(clk[%d])," % wclk)
print(" .SR({%s, %s})," % sr)
print(" .CE({%s, %s, %s, %s})," % ce[0:4])
print(" .WE(ce[%d])," % we)
print(" .O(d%d[7:0])," % i)
print(" .Q(d%d[15:8])," % i)
print(" .Q2(d%d[23:16])," % i)
print(" .MUX(d%d[31:24])" % i)
print(' );')
print()
D.clear()
for j in range(8):
D.append("d%d[%d]" % (i, j))
D.append("d%d[%d]" % (i, 24 + j))
if fftypes[2 * j] != "NONE":
D.append("d%d[%d]" % (i, 8 + j))
if fftypes[2 * j + 1] != "NONE":
D.append("d%d[%d]" % (i, 16 + j))
print(" assign q = d%d;" % (N - 1))
print("endmodule")
with open(os.path.join(os.getenv('URAY_DIR'), 'spec',
'slice_memory.v')) as f:
for l in f:
print(l.rstrip())
| SymbiFlow/prjuray | utils/spec/slice_memory.py | Python | isc | 9,356 |
from django.core.management.base import BaseCommand, CommandError
from metricsapp.models import Metric, SprintMetric
class Command(BaseCommand):
help = 'Runs all available metrics.'
#parser.add_argument('poll_id', nargs='+', type=int)
def add_arguments(self, parser):
parser.add_argument("-m", "--metric-names", type=str, dest="explicit", nargs="+")
def handle(self, *args, **options):
all_metrics = Metric.objects.filter(active=True).select_subclasses()
explicit = options['explicit']
if explicit:
explicit_metrics = [all_metrics.get(name=name) for name in explicit]
self._run_metrics(explicit_metrics)
else:
self._run_metrics(all_metrics)
def _run_metrics(self, lst):
for metric in lst:
self.stdout.write('Running "{}"'.format(metric))
metric.run()
| chrisma/ScrumLint | metricsapp/management/commands/run_metrics.py | Python | mit | 788 |
import unittest
from katas.kyu_7.complementary_dna import DNA_strand
class DNAStrandTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(DNA_strand('AAAA'), 'TTTT')
def test_equals_2(self):
self.assertEqual(DNA_strand('ATTGC'), 'TAACG')
def test_equals_3(self):
self.assertEqual(DNA_strand('GTAT'), 'CATA')
| the-zebulan/CodeWars | tests/kyu_7_tests/test_complementary_dna.py | Python | mit | 364 |
# -*- encoding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from pytils import VERSION as pytils_version
class ExamplesTestCase(TestCase):
def setUp(self):
self.c = Client()
def testIndex(self):
resp = self.c.get(reverse('pytils_example'))
self.assertEqual(resp.status_code, 200)
body = resp.content.decode('utf-8')
self.assertTrue('pytils %s' % pytils_version in body)
self.assertTrue(reverse('pytils_dt_example') in body)
self.assertTrue(reverse('pytils_numeral_example') in body)
self.assertTrue(reverse('pytils_translit_example') in body)
def testDt(self):
resp = self.c.get(reverse('pytils_dt_example'))
self.assertEqual(resp.status_code, 200)
body = resp.content.decode('utf-8')
self.assertTrue('distance_of_time' in body)
self.assertTrue('ru_strftime' in body)
self.assertTrue('ru_strftime_inflected' in body)
self.assertTrue('ru_strftime_preposition' in body)
self.assertTrue(u'вчера' in body)
self.assertTrue(u'завтра' in body)
def testNumeral(self):
resp = self.c.get(reverse('pytils_numeral_example'))
self.assertEqual(resp.status_code, 200)
body = resp.content.decode('utf-8')
self.assertTrue('choose_plural' in body)
self.assertTrue('get_plural' in body)
self.assertTrue('rubles' in body)
self.assertTrue('in_words' in body)
self.assertTrue('sum_string' in body)
self.assertTrue(u'комментарий' in body)
self.assertTrue(u'без примеров' in body)
self.assertTrue(u'двадцать три рубля пятнадцать копеек' in body)
self.assertTrue(u'двенадцать рублей' in body)
self.assertTrue(u'двадцать один' in body)
self.assertTrue(u'тридцать одна целая триста восемьдесят пять тысячных' in body)
self.assertTrue(u'двадцать один комментарий' in body)
def testTranslit(self):
resp = self.c.get(reverse('pytils_translit_example'))
self.assertEqual(resp.status_code, 200)
body = resp.content.decode('utf-8')
self.assertTrue('translify' in body)
self.assertTrue('detranslify' in body)
self.assertTrue('slugify' in body)
self.assertTrue('Primer trasliteratsii sredstvami pytils' in body)
self.assertTrue('primer-trasliteratsii-sredstvami-pytils' in body)
self.assertTrue('primer-obratnoj-transliteratsii' in body)
| Forever-Young/pytils | doc/examples-django/pytilsex/tests.py | Python | mit | 2,702 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-02-13 17:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0028_product_related_title'),
]
operations = [
migrations.RemoveField(
model_name='menuitemproduct',
name='category',
),
migrations.AddField(
model_name='menuitemproduct',
name='category',
field=models.ManyToManyField(blank=True, related_name='menuitem', related_query_name='menuit', to='product.Category', verbose_name='Category'),
),
]
| skylifewww/pangolin-fog | product/migrations/0029_auto_20170213_1741.py | Python | mit | 681 |
from collections import Counter
from imblearn.datasets import make_imbalance
from imblearn.metrics import classification_report_imbalanced
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import ClusterCentroids
from imblearn.under_sampling import NearMiss
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
def scatter_plot_2d(x_ls, y_ls):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y_ls))])
# plot class samples
for idx, c1 in enumerate(np.unique(y_ls)):
plt.scatter(x = x_ls[y_ls == c1, 0], y = x_ls[y_ls == c1, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = c1)
# plt.show()
def deci_bdry_plot_2d(x_ls, y_ls, classifier, resolution = .02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y_ls))])
# plot the decision surface
x1_min, x1_max = x_ls[:, 0].min() - 1, x_ls[:, 0].max() + 1
x2_min, x2_max = x_ls[:, 1].min() - 1, x_ls[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha = .4, cmap = cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, c1 in enumerate(np.unique(y_ls)):
plt.scatter(x = x_ls[y_ls == c1, 0], y = x_ls[y_ls == c1, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = c1)
# plt.show()
def multi_class_under_sampling():
'''
EXAMPLE: Multiclass classification with under-sampling
'''
RANDOM_STATE = 42
iris = load_iris()
X, y = make_imbalance(iris.data, iris.target, ratio = {0:25, 1:50, 2:50}, random_state = 0)
# print (X[:, [1, 2]])
# print (type(y))
X_train, X_test, y_train, y_test = train_test_split(X[:, [1, 2]], y, random_state = RANDOM_STATE)
# print ('Training target statistics: {}'.format(Counter(y_train)))
# print ('Testing target statistics: {}'.format(Counter(y_test)))
nm = NearMiss(version = 1, random_state = RANDOM_STATE)
X_resample_nm, y_resample_nm = nm.fit_sample(X_train, y_train)
cc = ClusterCentroids(random_state = 0)
X_resample_cc, y_resample_cc = cc.fit_sample(X_train, y_train)
'''plot two in one frame'''
fig, (ax0, ax1) = plt.subplots(ncols = 2)
# ax0, ax1 = axes.flatten()
ax0 = scatter_plot_2d(X_resample_nm, y_resample_nm)
ax1 = scatter_plot_2d(X_resample_nm, y_resample_nm)
# fig.tight_layout()
plt.show()
# pipeline_nm = make_pipeline(NearMiss(version = 1, random_state = RANDOM_STATE), LinearSVC(random_state = RANDOM_STATE))
# pipeline_nm.fit(X_train, y_train)
# pipeline_cc = make_pipeline(ClusterCentroids(random_state = 0), LinearSVC(random_state = RANDOM_STATE))
# pipeline_cc.fit(X_train, y_train)
# print (classification_report_imbalanced(y_test, pipeline_nm.predict(X_test)))
# deci_bdry_plot_2d(X[:, [1, 2]], y, pipeline_nm)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.scatter_plot(X[:, [1, 2]], y, pipeline)
# print (classification_report_imbalanced(y_test, pipeline.predict(X_test)))
pipeline_1= make_pipeline(NearMiss(version = 1, random_state = RANDOM_STATE), LinearSVC(random_state = RANDOM_STATE))
pipeline_1.fit(X_train, y_train)
ax2 = fig.add_subplot(212)
ax2.scatter_plot(X[:, [1, 2]], y, pipeline_1)
plt.show()
def wendy_try_iris():
'''
EXAMPLE: Multiclass classification with under-sampling
'''
RANDOM_STATE = 42
iris = load_iris()
# X, y = make_imbalance(iris.data, iris.target, ratio = {0:25, 1:50, 2:50}, random_state = 0)
X = pd.DataFrame(iris.data, columns = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width'])
y = pd.DataFrame(iris.target, columns = ['Species'])
df = X
df['Species'] = y
'''pair plot for the features'''
# sns.set(style='whitegrid', context='notebook')
# cols = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width']
# sns.pairplot(df, vars = cols, size=2.5, hue = 'Species')
# plt.show()
'''dimension reduction'''
# print (classification_report_imbalanced(y_test, pipeline_cc.predict(X_test)))
# deci_bdry_plot_2d(X[:, [1, 2]], y, pipeline_cc)
if __name__ == '__main__':
wendy_try_iris() | shunw/pythonML_code | eg2_real_world_data.py | Python | mit | 4,961 |
import unittest
class MyTest(unittest.TestCase):
def test(self):
self.assertEqual(4, 4)
| manjitkumar/drf-url-filters | filters/tests/tests.py | Python | mit | 102 |
'''trec_dd.* namespace package can have several subpackages, see
http://github.com/trec-dd for more info
.. This software is released under an MIT/X11 open source license.
Copyright 2015 Diffeo, Inc.
'''
import pkg_resources
pkg_resources.declare_namespace(__name__)
| trec-dd/trec-dd-simulation-harness | trec_dd/__init__.py | Python | mit | 272 |
# encoding: utf-8
"""
Test suite for pptx.presentation module.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from pptx.parts.coreprops import CorePropertiesPart
from pptx.parts.presentation import PresentationPart
from pptx.parts.slide import NotesMasterPart
from pptx.presentation import Presentation
from pptx.slide import SlideLayouts, SlideMaster, SlideMasters, Slides
from .unitutil.cxml import element, xml
from .unitutil.mock import class_mock, instance_mock, property_mock
class DescribePresentation(object):
def it_knows_the_height_of_its_slides(self, sld_height_get_fixture):
prs, expected_value = sld_height_get_fixture
assert prs.slide_height == expected_value
def it_can_change_the_height_of_its_slides(self, sld_height_set_fixture):
prs, slide_height, expected_xml = sld_height_set_fixture
prs.slide_height = slide_height
assert prs._element.xml == expected_xml
def it_knows_the_width_of_its_slides(self, sld_width_get_fixture):
prs, expected_value = sld_width_get_fixture
assert prs.slide_width == expected_value
def it_can_change_the_width_of_its_slides(self, sld_width_set_fixture):
prs, slide_width, expected_xml = sld_width_set_fixture
prs.slide_width = slide_width
assert prs._element.xml == expected_xml
def it_knows_its_part(self, part_fixture):
prs, prs_part_ = part_fixture
assert prs.part is prs_part_
def it_provides_access_to_its_core_properties(self, core_props_fixture):
prs, core_properties_ = core_props_fixture
assert prs.core_properties is core_properties_
def it_provides_access_to_its_notes_master(self, notes_master_fixture):
prs, notes_master_ = notes_master_fixture
assert prs.notes_master is notes_master_
def it_provides_access_to_its_slides(self, slides_fixture):
prs, rename_slide_parts_, rIds = slides_fixture[:3]
Slides_, slides_, expected_xml = slides_fixture[3:]
slides = prs.slides
rename_slide_parts_.assert_called_once_with(rIds)
Slides_.assert_called_once_with(
prs._element.xpath('p:sldIdLst')[0], prs
)
assert prs._element.xml == expected_xml
assert slides is slides_
def it_provides_access_to_its_slide_layouts(self, layouts_fixture):
prs, slide_layouts_ = layouts_fixture
assert prs.slide_layouts is slide_layouts_
def it_provides_access_to_its_slide_master(self, master_fixture):
prs, getitem_, slide_master_ = master_fixture
slide_master = prs.slide_master
getitem_.assert_called_once_with(0)
assert slide_master is slide_master_
def it_provides_access_to_its_slide_masters(self, masters_fixture):
prs, SlideMasters_, slide_masters_, expected_xml = masters_fixture
slide_masters = prs.slide_masters
SlideMasters_.assert_called_once_with(
prs._element.xpath('p:sldMasterIdLst')[0], prs
)
assert slide_masters is slide_masters_
assert prs._element.xml == expected_xml
def it_can_save_the_presentation_to_a_file(self, save_fixture):
prs, file_, prs_part_ = save_fixture
prs.save(file_)
prs_part_.save.assert_called_once_with(file_)
# fixtures -------------------------------------------------------
@pytest.fixture
def core_props_fixture(self, prs_part_, core_properties_):
prs = Presentation(None, prs_part_)
prs_part_.core_properties = core_properties_
return prs, core_properties_
@pytest.fixture
def layouts_fixture(self, masters_prop_, slide_layouts_):
prs = Presentation(None, None)
masters_prop_.return_value.__getitem__.return_value.slide_layouts = (
slide_layouts_
)
return prs, slide_layouts_
@pytest.fixture
def master_fixture(self, masters_prop_, slide_master_):
prs = Presentation(None, None)
getitem_ = masters_prop_.return_value.__getitem__
getitem_.return_value = slide_master_
return prs, getitem_, slide_master_
@pytest.fixture(params=[
('p:presentation',
'p:presentation/p:sldMasterIdLst'),
('p:presentation/p:sldMasterIdLst',
'p:presentation/p:sldMasterIdLst'),
])
def masters_fixture(self, request, SlideMasters_, slide_masters_):
prs_cxml, expected_cxml = request.param
prs = Presentation(element(prs_cxml), None)
expected_xml = xml(expected_cxml)
return prs, SlideMasters_, slide_masters_, expected_xml
@pytest.fixture
def notes_master_fixture(self, prs_part_, notes_master_):
prs = Presentation(None, prs_part_)
prs_part_.notes_master = notes_master_
return prs, notes_master_
@pytest.fixture
def part_fixture(self, prs_part_):
prs = Presentation(None, prs_part_)
return prs, prs_part_
@pytest.fixture
def save_fixture(self, prs_part_):
prs = Presentation(None, prs_part_)
file_ = 'foobar.docx'
return prs, file_, prs_part_
@pytest.fixture(params=[
('p:presentation', None),
('p:presentation/p:sldSz{cy=42}', 42),
])
def sld_height_get_fixture(self, request):
prs_cxml, expected_value = request.param
prs = Presentation(element(prs_cxml), None)
return prs, expected_value
@pytest.fixture(params=[
('p:presentation',
'p:presentation/p:sldSz{cy=914400}'),
('p:presentation/p:sldSz{cy=424242}',
'p:presentation/p:sldSz{cy=914400}'),
])
def sld_height_set_fixture(self, request):
prs_cxml, expected_cxml = request.param
prs = Presentation(element(prs_cxml), None)
expected_xml = xml(expected_cxml)
return prs, 914400, expected_xml
@pytest.fixture(params=[
('p:presentation', None),
('p:presentation/p:sldSz{cx=42}', 42),
])
def sld_width_get_fixture(self, request):
prs_cxml, expected_value = request.param
prs = Presentation(element(prs_cxml), None)
return prs, expected_value
@pytest.fixture(params=[
('p:presentation',
'p:presentation/p:sldSz{cx=914400}'),
('p:presentation/p:sldSz{cx=424242}',
'p:presentation/p:sldSz{cx=914400}'),
])
def sld_width_set_fixture(self, request):
prs_cxml, expected_cxml = request.param
prs = Presentation(element(prs_cxml), None)
expected_xml = xml(expected_cxml)
return prs, 914400, expected_xml
@pytest.fixture(params=[
('p:presentation', [], 'p:presentation/p:sldIdLst'),
('p:presentation/p:sldIdLst/p:sldId{r:id=a}', ['a'],
'p:presentation/p:sldIdLst/p:sldId{r:id=a}'),
('p:presentation/p:sldIdLst/(p:sldId{r:id=a},p:sldId{r:id=b})',
['a', 'b'],
'p:presentation/p:sldIdLst/(p:sldId{r:id=a},p:sldId{r:id=b})'),
])
def slides_fixture(self, request, part_prop_, Slides_, slides_):
prs_cxml, rIds, expected_cxml = request.param
prs = Presentation(element(prs_cxml), None)
rename_slide_parts_ = part_prop_.return_value.rename_slide_parts
expected_xml = xml(expected_cxml)
return prs, rename_slide_parts_, rIds, Slides_, slides_, expected_xml
# fixture components ---------------------------------------------
@pytest.fixture
def core_properties_(self, request):
return instance_mock(request, CorePropertiesPart)
@pytest.fixture
def masters_prop_(self, request):
return property_mock(request, Presentation, 'slide_masters')
@pytest.fixture
def notes_master_(self, request):
return instance_mock(request, NotesMasterPart)
@pytest.fixture
def part_prop_(self, request):
return property_mock(request, Presentation, 'part')
@pytest.fixture
def prs_part_(self, request):
return instance_mock(request, PresentationPart)
@pytest.fixture
def slide_layouts_(self, request):
return instance_mock(request, SlideLayouts)
@pytest.fixture
def SlideMasters_(self, request, slide_masters_):
return class_mock(
request, 'pptx.presentation.SlideMasters',
return_value=slide_masters_
)
@pytest.fixture
def slide_master_(self, request):
return instance_mock(request, SlideMaster)
@pytest.fixture
def slide_masters_(self, request):
return instance_mock(request, SlideMasters)
@pytest.fixture
def Slides_(self, request, slides_):
return class_mock(
request, 'pptx.presentation.Slides', return_value=slides_
)
@pytest.fixture
def slides_(self, request):
return instance_mock(request, Slides)
| biggihs/python-pptx | tests/test_presentation.py | Python | mit | 8,885 |
import nltk
class Text:
def __init__(self, raw_text):
self.raw_text = raw_text
def parse(self):
sentences = nltk.sent_tokenize(self.raw_text)
tokens = [nltk.word_tokenize(sentence) for sentence in sentences]
self.tagged_sentences = [self.tag(words) for words in tokens]
return self
def tag(self, sentence):
initial_tagged_words = nltk.pos_tag(sentence)
tagged_words = []
consecutive_names = []
last_tag = None
for tagged_word in initial_tagged_words:
if tagged_word[1].startswith('NNP'):
consecutive_names.append(tagged_word[0])
last_tag = tagged_word[1]
else:
if consecutive_names:
tagged_words.append((' '.join(consecutive_names), last_tag))
consecutive_names = []
tagged_words.append(tagged_word)
if consecutive_names:
tagged_words.append((' '.join(consecutive_names), last_tag))
return tagged_words
| kodki/cortext | cortext/text.py | Python | mit | 1,056 |
from pathlib import Path
import os
import structlog
log = structlog.get_logger()
_config = None
def get():
global _config
if not isinstance(_config, _build_config):
_config = _build_config()
return _config
class _build_config:
def __init__(self):
self._config = {}
self.dos_install_dir = os.environ["DOS_BIN"]
self.dos_log_dir = os.environ["DOS_LOG"]
self.env_var_contexts = ["dos"]
# load from toml file
self._load_toml_config()
# load from env variables
self._load_env_vars()
def get(self, key, default=None):
return self._config.get(key, None)
def put(self, key, value, context="default"):
self.add_config_value(key, value, context=context)
def check(self, key):
return key in self._config
def add_config_value(self, key, value, context="default"):
ctx_key = f"{context}_{key}"
self._config[ctx_key] = value
log.debug("set config", context=context, key=key, ctx_key=ctx_key)
def add_path_value(self, key, value, context):
self.add_config_value(key, Path(value), context=context)
def _load_toml_config(self):
# potentially add env var contexts
log.debug("loading toml config", file_name="TODO <> TODO")
def _load_env_vars(self):
log.debug("loading environ config")
for key in os.environ:
parts = key.lower().split("_")
ctx = parts[0]
if ctx not in self.env_var_contexts:
continue
log.info(f"discovered environ config", key=key)
if len(parts) == 2:
self.add_config_value(
parts[1], # key
os.environ[key], # value from env
context=ctx, # give context
)
elif len(parts) == 3:
k = parts[2]
t = parts[1]
if t == "path":
self.add_path_value(k, os.environ[key], context=ctx)
else:
raise ValueError(f'unrecognized key type "{t}" for "{key}"')
else:
ValueError(
f"incorrect number of parts for env var: {key}, expected 2 or 3"
)
def dos_bin(self):
log.info(f"dev ops shell bin: {self.dos_install_dir}")
dos_bin = Path(self.dos_install_dir)
dos_bin.mkdir(parents=True, exist_ok=True)
return dos_bin
| meantheory/dotfiles | dos/src/dos/config.py | Python | mit | 2,504 |
import importlib
from pygments.lexer import RegexLexer, bygroups
from pygments.styles import STYLE_MAP
from pygments.token import *
def load_style(full_class_string):
modulename, styleclass = full_class_string.split('::')
module = importlib.import_module("pygments.styles." + modulename)
return getattr(module, styleclass)
repl_styles = {}
for name, import_info in STYLE_MAP.items():
repl_styles[name] = load_style(import_info)
repl_styles[name].styles[Whitespace] = '' # some styles underline ws
class FranzLexer(RegexLexer):
name = 'Franz Lexer'
tokens = {
'root': [
(r'"', String.Double, 'double-quote'),
(r'[0-9]+(\.[0-9]+)?', Number),
(r'\b(if|else|for|while|in|to|fn|ⲗ|try|rescue|assert|include|yield|return|break|continue)\b', Keyword.Reserved),
(r'\b(int|str|any|float|list|dict|bool)\b', Keyword.Type),
(r'\b(and|or|not)\b', Operator.Word),
(r'#.*?$', Comment.Single),
(r'([a-zA-Z][a-zA-Z0-9_!?\-%$]*)(\s*)(=)(\s*)(fn)',
bygroups(Name.Function.Definition, Whitespace, Operator, Whitespace, Keyword.Reserved)),
(r'\b([a-zA-Z][a-zA-Z0-9_!?\-%$]*)(\s*)([(])', bygroups(Name.Function, Whitespace, Punctuation)),
(r'\b[a-zA-Z][a-zA-Z0-9_!?\-%$]*\b', Name),
(r'\s+([*+\-^=<>%/?]+)\s+', Operator),
(r'[@().,:;\[\]]', Punctuation),
(r'[{}]', Punctuation.Braces),
(r'\s+', Whitespace)
],
'double-quote': [
(r'\{.*?\}', String.Interpol),
(r'\\.', Literal.String.Escape),
(r'[^"{}\\]+', String.Double),
(r'"', String.Double, '#pop'),
]
} | cwells/franz | lib/pygments.py | Python | mit | 1,729 |
#!/usr/bin/env python
from .schema import Schema
from .yamale_error import YamaleError
def make_schema(path=None, parser='PyYAML', validators=None, content=None):
# validators = None means use default.
# Import readers here so we can get version information in setup.py.
from . import readers
raw_schemas = readers.parse_yaml(path, parser, content=content)
if not raw_schemas:
raise ValueError('{} is an empty file!'.format(path))
# First document is the base schema
try:
s = Schema(raw_schemas[0], path, validators=validators)
# Additional documents contain Includes.
for raw_schema in raw_schemas[1:]:
s.add_include(raw_schema)
except (TypeError, SyntaxError) as e:
error = 'Schema error in file %s\n' % path
error += str(e)
raise SyntaxError(error)
return s
def make_data(path=None, parser='PyYAML', content=None):
from . import readers
raw_data = readers.parse_yaml(path, parser, content=content)
if len(raw_data) == 0:
return [({}, path)]
return [(d, path) for d in raw_data]
def validate(schema, data, strict=True, _raise_error=True):
results = []
is_valid = True
for d, path in data:
result = schema.validate(d, path, strict)
results.append(result)
is_valid = is_valid and result.isValid()
if _raise_error and not is_valid:
raise YamaleError(results)
return results
| 23andMe/Yamale | yamale/yamale.py | Python | mit | 1,460 |
from __future__ import absolute_import, division, print_function
import platform
import sys
from threading import Thread, Lock
import json
import warnings
import time
import stripe
import pytest
if platform.python_implementation() == "PyPy":
pytest.skip("skip integration tests with PyPy", allow_module_level=True)
if sys.version_info[0] < 3:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
class TestIntegration(object):
@pytest.fixture(autouse=True)
def close_mock_server(self):
yield
if self.mock_server:
self.mock_server.shutdown()
self.mock_server.server_close()
self.mock_server_thread.join()
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {
"api_base": stripe.api_base,
"api_key": stripe.api_key,
"default_http_client": stripe.default_http_client,
"enable_telemetry": stripe.enable_telemetry,
"max_network_retries": stripe.max_network_retries,
"proxy": stripe.proxy,
}
stripe.api_base = "http://localhost:12111" # stripe-mock
stripe.api_key = "sk_test_123"
stripe.default_http_client = None
stripe.enable_telemetry = False
stripe.max_network_retries = 3
stripe.proxy = None
yield
stripe.api_base = orig_attrs["api_base"]
stripe.api_key = orig_attrs["api_key"]
stripe.default_http_client = orig_attrs["default_http_client"]
stripe.enable_telemetry = orig_attrs["enable_telemetry"]
stripe.max_network_retries = orig_attrs["max_network_retries"]
stripe.proxy = orig_attrs["proxy"]
def setup_mock_server(self, handler):
# Configure mock server.
# Passing 0 as the port will cause a random free port to be chosen.
self.mock_server = HTTPServer(("localhost", 0), handler)
_, self.mock_server_port = self.mock_server.server_address
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
self.mock_server_thread = Thread(target=self.mock_server.serve_forever)
self.mock_server_thread.setDaemon(True)
self.mock_server_thread.start()
def test_hits_api_base(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
def test_hits_proxy_through_default_http_client(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.proxy = "http://localhost:%s" % self.mock_server_port
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
stripe.proxy = "http://bad-url"
with warnings.catch_warnings(record=True) as w:
stripe.Balance.retrieve()
assert len(w) == 1
assert "stripe.proxy was updated after sending a request" in str(
w[0].message
)
assert MockServerRequestHandler.num_requests == 2
def test_hits_proxy_through_custom_client(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.default_http_client = (
stripe.http_client.new_default_http_client(
proxy="http://localhost:%s" % self.mock_server_port
)
)
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
def test_passes_client_telemetry_when_enabled(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
try:
self.__class__.num_requests += 1
req_num = self.__class__.num_requests
if req_num == 1:
time.sleep(31 / 1000) # 31 ms
assert not self.headers.get(
"X-Stripe-Client-Telemetry"
)
elif req_num == 2:
assert self.headers.get("X-Stripe-Client-Telemetry")
telemetry = json.loads(
self.headers.get("x-stripe-client-telemetry")
)
assert "last_request_metrics" in telemetry
req_id = telemetry["last_request_metrics"][
"request_id"
]
duration_ms = telemetry["last_request_metrics"][
"request_duration_ms"
]
assert req_id == "req_1"
# The first request took 31 ms, so the client perceived
# latency shouldn't be outside this range.
assert 30 < duration_ms < 300
else:
assert False, (
"Should not have reached request %d" % req_num
)
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.send_header("Request-Id", "req_%d" % req_num)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
except AssertionError as ex:
# Throwing assertions on the server side causes a
# connection error to be logged instead of an assertion
# failure. Instead, we return the assertion failure as
# json so it can be logged as a StripeError.
self.send_response(400)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(
json.dumps(
{
"error": {
"type": "invalid_request_error",
"message": str(ex),
}
}
).encode("utf-8")
)
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.enable_telemetry = True
stripe.Balance.retrieve()
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 2
def test_uses_thread_local_client_telemetry(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
seen_metrics = set()
stats_lock = Lock()
def do_GET(self):
with self.__class__.stats_lock:
self.__class__.num_requests += 1
req_num = self.__class__.num_requests
if self.headers.get("X-Stripe-Client-Telemetry"):
telemetry = json.loads(
self.headers.get("X-Stripe-Client-Telemetry")
)
req_id = telemetry["last_request_metrics"]["request_id"]
with self.__class__.stats_lock:
self.__class__.seen_metrics.add(req_id)
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.send_header("Request-Id", "req_%d" % req_num)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.enable_telemetry = True
stripe.default_http_client = stripe.http_client.RequestsClient()
def work():
stripe.Balance.retrieve()
stripe.Balance.retrieve()
threads = [Thread(target=work) for _ in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
assert MockServerRequestHandler.num_requests == 20
assert len(MockServerRequestHandler.seen_metrics) == 10
| stripe/stripe-python | tests/test_integration.py | Python | mit | 9,930 |
import functools
import os
import sys
import unittest
from contextlib import contextmanager
from cloudbridge.cloud.factory import CloudProviderFactory
from cloudbridge.cloud.interfaces import InstanceState
from cloudbridge.cloud.interfaces import TestMockHelperMixin
from six import reraise
def parse_bool(val):
if val:
return str(val).upper() in ['TRUE', 'YES']
else:
return False
@contextmanager
def cleanup_action(cleanup_func):
"""
Context manager to carry out a given
cleanup action after carrying out a set
of tasks, or when an exception occurs.
If any errors occur during the cleanup
action, those are ignored, and the original
traceback is preserved.
:params func: This function is called if
an exception occurs or at the end of the
context block. If any exceptions raised
by func are ignored.
Usage:
with cleanup_action(lambda e: print("Oops!")):
do_something()
"""
try:
yield
except Exception:
ex_class, ex_val, ex_traceback = sys.exc_info()
try:
cleanup_func()
except Exception as e:
print("Error during exception cleanup: {0}".format(e))
reraise(ex_class, ex_val, ex_traceback)
try:
cleanup_func()
except Exception as e:
print("Error during cleanup: {0}".format(e))
def skipIfNoService(services):
"""
A decorator for skipping tests if the provider
does not implement a given service.
"""
def wrap(func):
"""
The actual wrapper
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
provider = getattr(self, 'provider')
if provider:
for service in services:
if not provider.has_service(service):
self.skipTest("Skipping test because '%s' service is"
" not implemented" % (service,))
func(self, *args, **kwargs)
return wrapper
return wrap
TEST_DATA_CONFIG = {
"AWSCloudProvider": {
"image": os.environ.get('CB_IMAGE_AWS', 'ami-5ac2cd4d'),
"instance_type": os.environ.get('CB_INSTANCE_TYPE_AWS', 't2.nano'),
"placement": os.environ.get('CB_PLACEMENT_AWS', 'us-east-1a'),
},
"OpenStackCloudProvider": {
"image": os.environ.get('CB_IMAGE_OS',
'842b949c-ea76-48df-998d-8a41f2626243'),
"instance_type": os.environ.get('CB_INSTANCE_TYPE_OS', 'm1.tiny'),
"placement": os.environ.get('CB_PLACEMENT_OS', 'nova'),
},
"AzureCloudProvider": {
"placement":
os.environ.get('CB_PLACEMENT_AZURE', 'eastus'),
"image":
os.environ.get('CB_IMAGE_AZURE', 'CbTest-Img'),
"instance_type":
os.environ.get('CB_INSTANCE_TYPE_AZURE', 'Standard_DS1_v2'),
}
}
def get_provider_test_data(provider, key):
if "AWSCloudProvider" in provider.name:
return TEST_DATA_CONFIG.get("AWSCloudProvider").get(key)
elif "OpenStackCloudProvider" in provider.name:
return TEST_DATA_CONFIG.get("OpenStackCloudProvider").get(key)
elif "AzureCloudProvider" in provider.name:
return TEST_DATA_CONFIG.get("AzureCloudProvider").get(key)
return None
def create_test_network(provider, name):
"""
Create a network with one subnet, returning the network and subnet objects.
"""
net = provider.network.create(name=name)
cidr_block = (net.cidr_block).split('/')[0] or '10.0.0.1'
sn = net.create_subnet(cidr_block='{0}/28'.format(cidr_block), name=name,
zone=get_provider_test_data(provider, 'placement'))
return net, sn
def delete_test_network(network):
"""
Delete the supplied network, first deleting any contained subnets.
"""
with cleanup_action(lambda: network.delete()):
for sn in network.subnets():
sn.delete()
def create_test_instance(
provider, instance_name, subnet, zone=None, launch_config=None,
key_pair=None, security_groups=None):
return provider.compute.instances.create(
instance_name,
get_provider_test_data(provider, 'image'),
get_provider_test_data(provider, 'instance_type'),
subnet=subnet,
zone=zone,
key_pair=key_pair,
security_groups=security_groups,
launch_config=launch_config)
def get_test_instance(provider, name, key_pair=None, security_groups=None,
subnet=None):
launch_config = None
instance = create_test_instance(
provider,
name,
subnet=subnet,
key_pair=key_pair,
security_groups=security_groups,
launch_config=launch_config)
instance.wait_till_ready()
return instance
def get_test_fixtures_folder():
return os.path.join(os.path.dirname(__file__), 'fixtures/')
def delete_test_instance(instance):
if instance:
instance.terminate()
instance.wait_for([InstanceState.TERMINATED, InstanceState.UNKNOWN],
terminal_states=[InstanceState.ERROR])
def cleanup_test_resources(instance=None, network=None, security_group=None,
key_pair=None):
"""Clean up any combination of supplied resources."""
with cleanup_action(lambda:
delete_test_network(network) if network else None):
with cleanup_action(lambda: key_pair.delete() if key_pair else None):
with cleanup_action(
lambda:
security_group.delete() if security_group else None):
delete_test_instance(instance)
class ProviderTestBase(unittest.TestCase):
_provider = None
def setUp(self):
if isinstance(self.provider, TestMockHelperMixin):
self.provider.setUpMock()
def tearDown(self):
if isinstance(self.provider, TestMockHelperMixin):
self.provider.tearDownMock()
self._provider = None
def get_provider_wait_interval(self, provider_class):
if issubclass(provider_class, TestMockHelperMixin):
return 0
else:
return 1
def create_provider_instance(self):
provider_name = os.environ.get("CB_TEST_PROVIDER", "azure")
use_mock_drivers = parse_bool(
os.environ.get("CB_USE_MOCK_PROVIDERS", "False"))
factory = CloudProviderFactory()
provider_class = factory.get_provider_class(provider_name,
get_mock=use_mock_drivers)
config = {'default_wait_interval':
self.get_provider_wait_interval(provider_class),
'azure_subscription_id':
'7904d702-e01c-4826-8519-f5a25c866a96',
'azure_client_id':
'69621fe1-f59f-43de-8799-269007c76b95',
'azure_secret':
'Orcw9U5Kd4cUDntDABg0dygN32RQ4FGBYyLRaJ/BlrM=',
'azure_tenant':
'75ec242e-054d-4b22-98a9-a4602ebb6027',
'azure_resource_group': 'CB-TEST-TEST-RG',
'azure_storage_account': 'cbtestsa134',
'azure_vm_default_user_name': 'cbtestuser'
}
return provider_class(config)
@property
def provider(self):
if not self._provider:
self._provider = self.create_provider_instance()
return self._provider
| ms-azure-cloudbroker/cloudbridge | cloudbridge/cloud/providers/azure/integration_test/helpers.py | Python | mit | 7,555 |
from setuptools import setup
setup(
name='g1.databases',
packages=[
'g1.databases',
],
install_requires=[
'SQLAlchemy',
'g1.bases',
],
extras_require={
'parts': [
'g1.apps',
],
},
zip_safe=False,
)
| clchiou/garage | py/g1/databases/setup.py | Python | mit | 283 |
# Generated by Django 2.2.10 on 2021-06-08 14:37
import django.db.models.deletion
from django.db import (
migrations,
models,
)
from accelerator.utils import copy_m2m_fields
def migrate_member_profile_data(apps, schema_editor):
MemberProfile = apps.get_model('accelerator', 'MemberProfile')
MemberProfile1 = apps.get_model('accelerator', 'MemberProfile1')
CoreProfile = apps.get_model('accelerator', 'CoreProfile')
exclude = CoreProfile.objects.all().values_list('user_id', flat=True)
m2m_fields = ['gender_identity', 'interest_categories', 'program_families',
'ethno_racial_identification', 'additional_industries',
'functional_expertise', 'mentoring_specialties']
for profile in MemberProfile.objects.exclude(user_id__in=exclude):
profile_dict = profile.__dict__.copy()
profile_dict.pop("_state")
profile_dict.pop("id")
new_profile = MemberProfile1.objects.create(**profile_dict)
copy_m2m_fields(profile, new_profile, m2m_fields)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0057_update_polymorphic_ctype'),
]
operations = [
migrations.CreateModel(
name='MemberProfile1',
fields=[
('coreprofile_ptr',
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='accelerator.CoreProfile')),
],
options={
'db_table': 'accelerator_memberprofile1',
},
bases=('accelerator.coreprofile',),
),
migrations.RunPython(migrate_member_profile_data,
migrations.RunPython.noop),
]
| masschallenge/django-accelerator | accelerator/migrations/0058_memberprofile1.py | Python | mit | 1,918 |
# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
import logging
import random
import time
import gevent
from gevent.event import AsyncResult
from gevent.queue import Empty, Queue
from gevent.timeout import Timeout
from random import randint
from ethereum import slogging
from ethereum.utils import sha3
from raiden.messages import (
MediatedTransfer,
RefundTransfer,
RevealSecret,
Secret,
SecretRequest,
TransferTimeout,
)
from raiden.utils import lpex, pex
__all__ = (
'StartMediatedTransferTask',
'MediateTransferTask',
'EndMediatedTransferTask',
)
log = slogging.get_logger(__name__) # pylint: disable=invalid-name
REMOVE_CALLBACK = object()
DEFAULT_EVENTS_POLL_TIMEOUT = 0.5
DEFAULT_HEALTHCHECK_POLL_TIMEOUT = 1
ESTIMATED_BLOCK_TIME = 7
TIMEOUT = object()
class Task(gevent.Greenlet):
""" Base class used to created tasks.
Note:
Always call super().__init__().
"""
def __init__(self):
super(Task, self).__init__()
self.response_queue = Queue()
def on_completion(self, success):
self.transfermanager.on_task_completed(self, success)
return success
def on_response(self, response):
""" Add a new response message to the task queue. """
if log.isEnabledFor(logging.DEBUG):
log.debug(
'RESPONSE MESSAGE RECEIVED %s %s',
repr(self),
response,
)
self.response_queue.put(response)
class HealthcheckTask(Task):
""" Task for checking if all of our open channels are healthy """
def __init__(
self,
raiden,
send_ping_time,
max_unresponsive_time,
sleep_time=DEFAULT_HEALTHCHECK_POLL_TIMEOUT):
""" Initialize a HealthcheckTask that will monitor open channels for
responsiveness.
:param raiden RaidenService: The Raiden service which will give us
access to the protocol object and to
the asset manager
:param int sleep_time: Time in seconds between each healthcheck task
:param int send_ping_time: Time in seconds after not having received
a message from an address at which to send
a Ping.
:param int max_unresponsive_time: Time in seconds after not having received
a message from an address at which it
should be deleted.
"""
super(HealthcheckTask, self).__init__()
self.protocol = raiden.protocol
self.raiden = raiden
self.stop_event = AsyncResult()
self.sleep_time = sleep_time
self.send_ping_time = send_ping_time
self.max_unresponsive_time = max_unresponsive_time
def _run(self): # pylint: disable=method-hidden
stop = None
while stop is None:
keys_to_remove = []
for key, queue in self.protocol.address_queue.iteritems():
receiver_address = key[0]
asset_address = key[1]
if queue.empty():
elapsed_time = (
time.time() - self.protocol.last_received_time[receiver_address]
)
# Add a randomized delay in the loop to not clog the network
gevent.sleep(randint(0, int(0.2 * self.send_ping_time)))
if elapsed_time > self.max_unresponsive_time:
# remove the node from the graph
asset_manager = self.raiden.get_manager_by_asset_address(
asset_address
)
asset_manager.channelgraph.remove_path(
self.protocol.raiden.address,
receiver_address
)
# remove the node from the queue
keys_to_remove.append(key)
elif elapsed_time > self.send_ping_time:
self.protocol.send_ping(receiver_address)
for key in keys_to_remove:
self.protocol.address_queue.pop(key)
self.timeout = Timeout(self.sleep_time) # wait() will call cancel()
stop = self.stop_event.wait(self.timeout)
def stop_and_wait(self):
self.stop_event.set(True)
gevent.wait(self)
def stop_async(self):
self.stop_event.set(True)
class AlarmTask(Task):
""" Task to notify when a block is mined. """
def __init__(self, chain):
super(AlarmTask, self).__init__()
self.callbacks = list()
self.stop_event = AsyncResult()
self.chain = chain
self.last_block_number = self.chain.block_number()
# TODO: Start with a larger wait_time and decrease it as the
# probability of a new block increases.
self.wait_time = 0.5
def register_callback(self, callback):
""" Register a new callback.
Note:
This callback will be executed in the AlarmTask context and for
this reason it should not block, otherwise we can miss block
changes.
"""
if not callable(callback):
raise ValueError('callback is not a callable')
self.callbacks.append(callback)
def _run(self): # pylint: disable=method-hidden
stop = None
result = None
last_loop = time.time()
log.debug('starting block number', block_number=self.last_block_number)
while stop is None:
current_block = self.chain.block_number()
if current_block > self.last_block_number + 1:
difference = current_block - self.last_block_number - 1
log.error(
'alarm missed %s blocks',
difference,
)
if current_block != self.last_block_number:
self.last_block_number = current_block
log.debug('new block', number=current_block, timestamp=last_loop)
remove = list()
for callback in self.callbacks:
try:
result = callback(current_block)
except: # pylint: disable=bare-except
log.exception('unexpected exception on alarm')
else:
if result is REMOVE_CALLBACK:
remove.append(callback)
for callback in remove:
self.callbacks.remove(callback)
# we want this task to iterate in the tick of `wait_time`, so take
# into account how long we spent executing one tick.
work_time = time.time() - last_loop
if work_time > self.wait_time:
log.warning(
'alarm loop is taking longer than the wait time',
work_time=work_time,
wait_time=self.wait_time,
)
sleep_time = 0.001
else:
sleep_time = self.wait_time - work_time
stop = self.stop_event.wait(sleep_time)
last_loop = time.time()
def stop_and_wait(self):
self.stop_event.set(True)
gevent.wait(self)
def stop_async(self):
self.stop_event.set(True)
class BaseMediatedTransferTask(Task):
def _send_and_wait_time(self, raiden, recipient, transfer, timeout):
""" Utility to handle multiple messages for the same hashlock while
properly handling expiration timeouts.
"""
current_time = time.time()
limit_time = current_time + timeout
raiden.send_async(recipient, transfer)
while current_time <= limit_time:
# wait for a response message (not the Ack for the transfer)
try:
response = self.response_queue.get(
timeout=limit_time - current_time,
)
except Empty:
yield TIMEOUT
return
yield response
current_time = time.time()
if log.isEnabledFor(logging.DEBUG):
log.debug(
'TIMED OUT %s %s',
self.__class__,
pex(transfer),
)
def _send_and_wait_block(self, raiden, recipient, transfer, expiration_block):
""" Utility to handle multiple messages and timeout on a blocknumber. """
raiden.send_async(recipient, transfer)
current_block = raiden.get_block_number()
while current_block < expiration_block:
try:
response = self.response_queue.get(
timeout=DEFAULT_EVENTS_POLL_TIMEOUT
)
except Empty:
pass
else:
if response:
yield response
current_block = raiden.get_block_number()
if log.isEnabledFor(logging.DEBUG):
log.debug(
'TIMED OUT ON BLOCK %s %s %s',
current_block,
self.__class__,
pex(transfer),
)
yield TIMEOUT
def _wait_for_unlock_or_close(self, raiden, assetmanager, channel, mediated_transfer): # noqa
""" Wait for a Secret message from our partner to update the local
state, if the Secret message is not sent within time the channel will
be closed.
Note:
Must be called only once the secret is known.
Must call `on_hashlock_result` after this function returns.
"""
if not isinstance(mediated_transfer, MediatedTransfer):
raise ValueError('MediatedTransfer expected.')
block_to_close = mediated_transfer.lock.expiration - raiden.config['reveal_timeout']
hashlock = mediated_transfer.lock.hashlock
identifier = mediated_transfer.identifier
asset = mediated_transfer.asset
while channel.our_state.balance_proof.is_unclaimed(hashlock):
current_block = raiden.get_block_number()
if current_block > block_to_close:
if log.isEnabledFor(logging.WARN):
log.warn(
'Closing channel (%s, %s) to prevent expiration of lock %s %s',
pex(channel.our_state.address),
pex(channel.partner_state.address),
pex(hashlock),
repr(self),
)
channel.netting_channel.close(
channel.our_state.address,
channel.our_state.balance_proof.transfer,
channel.partner_state.balance_proof.transfer,
)
return
try:
response = self.response_queue.get(
timeout=DEFAULT_EVENTS_POLL_TIMEOUT
)
except Empty:
pass
else:
if isinstance(response, Secret):
if response.identifier == identifier and response.asset == asset:
assetmanager.handle_secretmessage(response)
else:
assetmanager.handle_secret(identifier, response.secret)
if log.isEnabledFor(logging.ERROR):
log.error(
'Invalid Secret message received, expected message'
' for asset=%s identifier=%s received=%s',
asset,
identifier,
response,
)
elif isinstance(response, RevealSecret):
assetmanager.handle_secret(identifier, response.secret)
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message ignoring. %s %s',
repr(response),
repr(self),
)
def _wait_expiration(self, raiden, transfer, sleep=DEFAULT_EVENTS_POLL_TIMEOUT):
""" Utility to wait until the expiration block.
For a chain A-B-C, if an attacker controls A and C a mediated transfer
can be done through B and C will wait for/send a timeout, for that
reason B must not unregister the hashlock from the transfermanager
until the lock has expired, otherwise the revealed secret wouldnt be
caught.
"""
# pylint: disable=no-self-use
expiration = transfer.lock.expiration + 1
while True:
current_block = raiden.get_block_number()
if current_block > expiration:
return
gevent.sleep(sleep)
# Note: send_and_wait_valid methods are used to check the message type and
# sender only, this can be improved by using a encrypted connection between the
# nodes making the signature validation unnecessary
class StartMediatedTransferTask(BaseMediatedTransferTask):
def __init__(self, raiden, asset_address, amount, identifier, target, done_result):
# pylint: disable=too-many-arguments
super(StartMediatedTransferTask, self).__init__()
self.raiden = raiden
self.asset_address = asset_address
self.amount = amount
self.identifier = identifier
self.target = target
self.done_result = done_result
def __repr__(self):
return '<{} {} asset:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.asset_address),
)
def _run(self): # noqa pylint: disable=method-hidden,too-many-locals
raiden = self.raiden
amount = self.amount
identifier = self.identifier
target = self.target
node_address = raiden.address
assetmanager = raiden.get_manager_by_asset_address(self.asset_address)
transfermanager = assetmanager.transfermanager
fee = 0
# there are no guarantees that the next_hop will follow the same route
routes = assetmanager.get_best_routes(
amount,
target,
lock_timeout=None,
)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'START MEDIATED TRANSFER initiator:%s target:%s',
pex(node_address),
pex(target),
)
for path, forward_channel in routes:
# never reuse the last secret, discard it to avoid losing asset
secret = sha3(hex(random.getrandbits(256)))
hashlock = sha3(secret)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'START MEDIATED TRANSFER NEW PATH path:%s hashlock:%s',
lpex(path),
pex(hashlock),
)
transfermanager.register_task_for_hashlock(self, hashlock)
assetmanager.register_channel_for_hashlock(forward_channel, hashlock)
lock_timeout = forward_channel.settle_timeout - forward_channel.reveal_timeout
lock_expiration = raiden.get_block_number() + lock_timeout
mediated_transfer = forward_channel.create_mediatedtransfer(
node_address,
target,
fee,
amount,
identifier,
lock_expiration,
hashlock,
)
raiden.sign(mediated_transfer)
forward_channel.register_transfer(mediated_transfer)
for response in self.send_and_iter_valid(raiden, path, mediated_transfer):
valid_secretrequest = (
isinstance(response, SecretRequest) and
response.amount == amount and
response.hashlock == hashlock and
response.identifier == identifier
)
if valid_secretrequest:
# This node must reveal the Secret starting with the
# end-of-chain, the `next_hop` can not be trusted to reveal the
# secret to the other nodes.
revealsecret_message = RevealSecret(secret)
raiden.sign(revealsecret_message)
# we cannot wait for ever since the `target` might
# intentionally _not_ send the Ack, blocking us from
# unlocking the asset.
wait = (
ESTIMATED_BLOCK_TIME * lock_timeout / .6
)
raiden.send_and_wait(target, revealsecret_message, wait)
# target has acknowledged the RevealSecret, we can update
# the chain in the forward direction
assetmanager.handle_secret(
identifier,
secret,
)
# call the callbacks and unregister the task
transfermanager.on_hashlock_result(hashlock, True)
# the transfer is done when the lock is unlocked and the Secret
# message is sent (doesn't imply the other nodes in the chain
# have unlocked/withdrawn)
self.done_result.set(True)
return
# someone down the line timed out / couldn't proceed, try next
# path, stop listening for messages for the current hashlock
else:
# the initiator can unregister right away because it knowns
# no one else can reveal the secret
transfermanager.on_hashlock_result(hashlock, False)
del assetmanager.hashlock_channel[hashlock]
break
if log.isEnabledFor(logging.DEBUG):
log.debug(
'START MEDIATED TRANSFER FAILED initiator:%s target:%s',
pex(node_address),
pex(self.target),
)
# all routes failed, consider:
# - if the target is a good node to have a channel:
# - deposit/reopen/open a channel with target
# - if the target has a direct channel with good nodes and there is
# sufficient funds to complete the transfer
# - open the required channels with these nodes
self.done_result.set(False)
def send_and_iter_valid(self, raiden, path, mediated_transfer): # noqa pylint: disable=no-self-use
""" Send the `mediated_transfer` and wait for either a message from
`target` or the `next_hop`.
"""
next_hop = path[1]
target = path[-1]
response_iterator = self._send_and_wait_time(
raiden,
mediated_transfer.recipient,
mediated_transfer,
raiden.config['msg_timeout'],
)
for response in response_iterator:
refund_or_timeout = (
isinstance(response, (RefundTransfer, TransferTimeout)) and
response.sender == next_hop
)
secret_request = (
isinstance(response, SecretRequest) and
response.sender == target
)
timeout = response is TIMEOUT
if refund_or_timeout or secret_request or timeout:
yield response
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message ignoring. %s',
repr(response),
)
return
class MediateTransferTask(BaseMediatedTransferTask):
def __init__(self, raiden, asset_address, originating_transfer, fee):
super(MediateTransferTask, self).__init__()
self.raiden = raiden
self.asset_address = asset_address
self.originating_transfer = originating_transfer
self.fee = fee
def __repr__(self):
return '<{} {} asset:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.asset_address),
)
def _run(self): # noqa
# pylint: disable=method-hidden,too-many-locals,too-many-branches,too-many-statements
raiden = self.raiden
fee = self.fee
originating_transfer = self.originating_transfer
raiden = self.raiden
assetmanager = raiden.get_manager_by_asset_address(self.asset_address)
transfermanager = assetmanager.transfermanager
from_address = originating_transfer.sender
originating_channel = assetmanager.partneraddress_channel[from_address]
hashlock = originating_transfer.lock.hashlock
transfermanager.register_task_for_hashlock(self, hashlock)
assetmanager.register_channel_for_hashlock(originating_channel, hashlock)
# there are no guarantees that the next_hop will follow the same route
routes = assetmanager.get_best_routes(
originating_transfer.lock.amount,
originating_transfer.target,
)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'MEDIATED TRANSFER initiator:%s node:%s target:%s',
pex(originating_transfer.initiator),
pex(raiden.address),
pex(originating_transfer.target),
)
maximum_expiration = (
originating_channel.settle_timeout +
raiden.get_block_number()
)
# Ignore locks that expire after settle_timeout
if originating_transfer.lock.expiration > maximum_expiration:
if log.isEnabledFor(logging.ERROR):
log.debug(
'lock_expiration is too large, ignore the mediated transfer',
initiator=pex(originating_transfer.initiator),
node=pex(self.address),
target=pex(originating_transfer.target),
)
# Notes:
# - The node didn't send a transfer forward, so it can not lose
# asset.
# - It's quiting early, so it wont claim the lock if the secret is
# revealed.
# - The previous_node knowns the settle_timeout because this value
# is in the smart contract.
# - It's not sending a RefundTransfer to the previous_node, so it
# will force a retry with a new path/different hashlock, this
# could make the bad behaving node lose it's fees but it will
# also increase latency.
return
for path, forward_channel in routes:
current_block_number = raiden.get_block_number()
# Dont forward the mediated transfer to the next_hop if we cannot
# decrease the expiration by `reveal_timeout`, this is time
# required to learn the secret through the blockchain that needs to
# consider DoS attacks.
lock_timeout = originating_transfer.lock.expiration - current_block_number
if lock_timeout < forward_channel.reveal_timeout:
if log.isEnabledFor(logging.INFO):
log.info(
'transfer.lock_expiration is smaller than'
' reveal_timeout, channel/path cannot be used',
lock_timeout=originating_transfer.lock.expiration,
reveal_timeout=forward_channel.reveal_timeout,
settle_timeout=forward_channel.settle_timeout,
nodeid=pex(path[0]),
partner=pex(path[1]),
)
continue
new_lock_timeout = lock_timeout - forward_channel.reveal_timeout
# Our partner won't accept a locked transfer that can expire after
# the settlement period, otherwise the secret could be revealed
# after channel is settled and asset would be lost, in that case
# decrease the expiration by an amount larger than reveal_timeout.
if new_lock_timeout > forward_channel.settle_timeout:
new_lock_timeout = forward_channel.settle_timeout
if log.isEnabledFor(logging.DEBUG):
log.debug(
'lock_expiration would be too large, decrement more so'
' that the channel/path can be used',
lock_timeout=lock_timeout,
new_lock_timeout=new_lock_timeout,
nodeid=pex(path[0]),
partner=pex(path[1]),
)
new_lock_expiration = (
current_block_number +
new_lock_timeout -
# FIXME: decrement to avoid boundary errors (ensure that
# less-than is used instead of less-than-equal)
2
)
mediated_transfer = forward_channel.create_mediatedtransfer(
originating_transfer.initiator,
originating_transfer.target,
fee,
originating_transfer.lock.amount,
originating_transfer.identifier,
new_lock_expiration,
hashlock,
)
raiden.sign(mediated_transfer)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'MEDIATED TRANSFER NEW PATH path:%s hashlock:%s',
lpex(path),
pex(hashlock),
)
assetmanager.register_channel_for_hashlock(
forward_channel,
hashlock,
)
forward_channel.register_transfer(mediated_transfer)
for response in self.send_and_iter_valid(raiden, path, mediated_transfer):
valid_refund = (
isinstance(response, RefundTransfer) and
response.lock.amount == originating_transfer.lock.amount
)
if isinstance(response, RevealSecret):
assetmanager.handle_secret(
originating_transfer.identifier,
response.secret,
)
self._wait_for_unlock_or_close(
raiden,
assetmanager,
originating_channel,
originating_transfer,
)
elif isinstance(response, Secret):
assetmanager.handle_secretmessage(response)
# Secret might be from a different node, wait for the
# update from `from_address`
self._wait_for_unlock_or_close(
raiden,
assetmanager,
originating_channel,
originating_transfer,
)
transfermanager.on_hashlock_result(hashlock, True)
return
elif valid_refund:
forward_channel.register_transfer(response)
break
else:
timeout_message = originating_channel.create_timeouttransfer_for(
originating_transfer,
)
raiden.send_async(
originating_transfer.sender,
timeout_message,
)
self._wait_expiration(
raiden,
originating_transfer,
)
transfermanager.on_hashlock_result(hashlock, False)
return
# No suitable path avaiable (e.g. insufficient distributable, no active node)
# Send RefundTransfer to the originating node, this has the effect of
# backtracking in the graph search of the raiden network.
if log.isEnabledFor(logging.DEBUG):
log.debug(
'REFUND MEDIATED TRANSFER from=%s node:%s hashlock:%s',
pex(from_address),
pex(raiden.address),
pex(hashlock),
)
refund_transfer = originating_channel.create_refundtransfer_for(
originating_transfer,
)
raiden.sign(refund_transfer)
originating_channel.register_transfer(refund_transfer)
raiden.send_async(from_address, refund_transfer)
self._wait_expiration(
raiden,
originating_transfer,
)
transfermanager.on_hashlock_result(hashlock, False)
def send_and_iter_valid(self, raiden, path, mediated_transfer):
response_iterator = self._send_and_wait_time(
raiden,
mediated_transfer.recipient,
mediated_transfer,
raiden.config['msg_timeout'],
)
for response in response_iterator:
timeout = response is TIMEOUT
secret = isinstance(response, (Secret, RevealSecret))
refund_or_timeout = (
isinstance(response, (RefundTransfer, TransferTimeout)) and
response.sender == path[0]
)
if timeout or secret or refund_or_timeout:
yield response
elif log.isEnabledFor(logging.ERROR):
log.error(
'Partner sent an invalid message. %s',
repr(response),
)
class EndMediatedTransferTask(BaseMediatedTransferTask):
""" Task that requests a secret for a registered transfer. """
def __init__(self, raiden, asset_address, originating_transfer):
super(EndMediatedTransferTask, self).__init__()
self.raiden = raiden
self.asset_address = asset_address
self.originating_transfer = originating_transfer
def __repr__(self):
return '<{} {} asset:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.asset_address),
)
def _run(self): # pylint: disable=method-hidden
raiden = self.raiden
originating_transfer = self.originating_transfer
hashlock = originating_transfer.lock.hashlock
assetmanager = raiden.get_manager_by_asset_address(self.asset_address)
transfermanager = assetmanager.transfermanager
originating_channel = assetmanager.get_channel_by_partner_address(
originating_transfer.sender,
)
transfermanager.register_task_for_hashlock(self, hashlock)
assetmanager.register_channel_for_hashlock(originating_channel, hashlock)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'END MEDIATED TRANSFER %s -> %s msghash:%s hashlock:%s',
pex(originating_transfer.target),
pex(originating_transfer.initiator),
pex(originating_transfer.hash),
pex(originating_transfer.lock.hashlock),
)
secret_request = SecretRequest(
originating_transfer.identifier,
originating_transfer.lock.hashlock,
originating_transfer.lock.amount,
)
raiden.sign(secret_request)
# If the transfer timed out in the initiator a new hashlock will be
# created and this task will not receive a secret, this is fine because
# the task will eventually exit once a blocktimeout happens and a new
# task will be created for the new hashlock
valid_messages_iterator = self.send_secretrequest_and_iter_valid(
raiden,
originating_transfer,
secret_request,
)
for response in valid_messages_iterator:
# at this point a Secret message is not valid
if isinstance(response, RevealSecret):
assetmanager.handle_secret(
originating_transfer.identifier,
response.secret,
)
self._wait_for_unlock_or_close(
raiden,
assetmanager,
originating_channel,
originating_transfer,
)
transfermanager.on_hashlock_result(hashlock, True)
return
elif response is TIMEOUT:
# this task timeouts on a blocknumber, at this point all the other
# nodes have timedout
transfermanager.on_hashlock_result(originating_transfer.lock.hashlock, False)
break
def send_secretrequest_and_iter_valid(self, raiden, originating_transfer, secret_request):
# pylint: disable=invalid-name
# keep this task alive up to the expiration block
response_iterator = self._send_and_wait_block(
raiden,
originating_transfer.initiator,
secret_request,
originating_transfer.lock.expiration,
)
# a Secret message is not valid here since the secret needs to first be
# revealed to the target
for response in response_iterator:
if isinstance(response, RevealSecret):
yield response
break
elif response is TIMEOUT:
if log.isEnabledFor(logging.ERROR):
log.error(
'SECRETREQUEST TIMED OUT node:%s msghash:%s hashlock:%s',
pex(raiden.address),
pex(secret_request.hash),
pex(originating_transfer.lock.hashlock),
)
yield response
elif log.isEnabledFor(logging.ERROR):
log.error(
'INVALID MESSAGE RECEIVED %s',
repr(response),
)
class StartExchangeTask(BaseMediatedTransferTask):
""" Initiator task, responsible to choose a random secret, initiate the
asset exchange by sending a mediated transfer to the counterparty and
revealing the secret once the exchange can be complete.
"""
def __init__(self, identifier, raiden, from_asset, from_amount, to_asset, to_amount, target):
# pylint: disable=too-many-arguments
super(StartExchangeTask, self).__init__()
self.identifier = identifier
self.raiden = raiden
self.from_asset = from_asset
self.from_amount = from_amount
self.to_asset = to_asset
self.to_amount = to_amount
self.target = target
def __repr__(self):
return '<{} {} from_asset:{} to_asset:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.from_asset),
pex(self.to_asset),
)
def _run(self): # pylint: disable=method-hidden,too-many-locals
identifier = self.identifier
raiden = self.raiden
from_asset = self.from_asset
from_amount = self.from_amount
to_asset = self.to_asset
to_amount = self.to_amount
target = self.target
from_assetmanager = raiden.get_manager_by_asset_address(from_asset)
to_assetmanager = raiden.get_manager_by_asset_address(to_asset)
from_transfermanager = from_assetmanager.transfermanager
from_routes = from_assetmanager.get_best_routes(
from_amount,
target,
lock_timeout=None,
)
fee = 0
for path, from_channel in from_routes:
# for each new path a new secret must be used
secret = sha3(hex(random.getrandbits(256)))
hashlock = sha3(secret)
from_transfermanager.register_task_for_hashlock(self, hashlock)
from_assetmanager.register_channel_for_hashlock(from_channel, hashlock)
lock_expiration = (
raiden.get_block_number() +
from_channel.settle_timeout -
raiden.config['reveal_timeout']
)
from_mediated_transfer = from_channel.create_mediatedtransfer(
raiden.address,
target,
fee,
from_amount,
identifier,
lock_expiration,
hashlock,
)
raiden.sign(from_mediated_transfer)
from_channel.register_transfer(from_mediated_transfer)
# wait for the SecretRequest and MediatedTransfer
to_mediated_transfer = self.send_and_wait_valid_state(
raiden,
path,
from_mediated_transfer,
to_asset,
to_amount,
)
if to_mediated_transfer is None:
# the initiator can unregister right away since it knows the
# secret wont be revealed
from_transfermanager.on_hashlock_result(hashlock, False)
del from_assetmanager.hashlock_channel[hashlock]
elif isinstance(to_mediated_transfer, MediatedTransfer):
to_hop = to_mediated_transfer.sender
# reveal the secret to the `to_hop` and `target`
self.reveal_secret(
self.raiden,
secret,
last_node=to_hop,
exchange_node=target,
)
to_channel = to_assetmanager.get_channel_by_partner_address(
to_mediated_transfer.sender
)
# now the secret can be revealed forward (`from_hop`)
from_assetmanager.handle_secret(identifier, secret)
to_assetmanager.handle_secret(identifier, secret)
self._wait_for_unlock_or_close(
raiden,
to_assetmanager,
to_channel,
to_mediated_transfer,
)
from_transfermanager.on_hashlock_result(hashlock, True)
self.done_result.set(True)
def send_and_wait_valid_state( # noqa
self,
raiden,
path,
from_asset_transfer,
to_asset,
to_amount):
""" Start the exchange by sending the first mediated transfer to the
taker and wait for mediated transfer for the exchanged asset.
This method will validate the messages received, discard the invalid
ones, and wait until a valid state is reached. The valid state is
reached when a mediated transfer for `to_asset` with `to_amount` tokens
and a SecretRequest from the taker are received.
Returns:
None: when the timeout was reached.
MediatedTransfer: when a valid state is reached.
RefundTransfer/TransferTimeout: when an invalid state is reached by
our partner.
"""
# pylint: disable=too-many-arguments
next_hop = path[1]
taker_address = path[-1] # taker_address and next_hop might be equal
# a valid state must have a secret request from the maker and a valid
# mediated transfer for the new asset
received_secretrequest = False
mediated_transfer = None
response_iterator = self._send_and_wait_time(
raiden,
from_asset_transfer.recipient,
from_asset_transfer,
raiden.config['msg_timeout'],
)
for response in response_iterator:
if response is None:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'EXCHANGE TRANSFER TIMED OUT hashlock:%s',
pex(from_asset_transfer.lock.hashlock),
)
return None
# The MediatedTransfer might be from `next_hop` or most likely from
# a different node.
#
# The other participant must not use a direct transfer to finish
# the asset exchange, ignore it
if isinstance(response, MediatedTransfer) and response.asset == to_asset:
# XXX: allow multiple transfers to add up to the correct amount
if response.lock.amount == to_amount:
mediated_transfer = response
elif isinstance(response, SecretRequest) and response.sender == taker_address:
received_secretrequest = True
# next_hop could send the MediatedTransfer, this is handled in a
# previous if
elif response.sender == next_hop:
if isinstance(response, (RefundTransfer, TransferTimeout)):
return response
else:
if log.isEnabledFor(logging.INFO):
log.info(
'Partner %s sent an invalid message %s',
pex(next_hop),
repr(response),
)
return None
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message ignoring. %s',
repr(response),
)
if mediated_transfer and received_secretrequest:
return mediated_transfer
return None
def reveal_secret(self, raiden, secret, last_node, exchange_node):
""" Reveal the `secret` to both participants.
The secret must be revealed backwards to get the incentives right
(first mediator would not forward the secret and get the transfer to
itself).
With exchanges there is an additional failure point, if a node is
mediating both asset transfers it can intercept the transfer (as in not
revealing the secret to others), for this reason it is not sufficient
to just send the Secret backwards, the Secret must also be sent to the
exchange_node.
"""
# pylint: disable=no-self-use
reveal_secret = RevealSecret(secret)
raiden.sign(reveal_secret)
# first reveal the secret to the last_node in the chain, proceed after
# ack
raiden.send_and_wait(last_node, reveal_secret, timeout=None) # XXX: wait for expiration
# the last_node has acknowledged the Secret, so we know the exchange
# has kicked-off, reveal the secret to the exchange_node to
# avoid interceptions but dont wait
raiden.send_async(exchange_node, reveal_secret)
class ExchangeTask(BaseMediatedTransferTask):
""" Counterparty task, responsible to receive a MediatedTransfer for the
from_transfer and forward a to_transfer with the same hashlock.
"""
def __init__(self, raiden, from_mediated_transfer, to_asset, to_amount, target):
# pylint: disable=too-many-arguments
super(ExchangeTask, self).__init__()
self.raiden = raiden
self.from_mediated_transfer = from_mediated_transfer
self.target = target
self.to_amount = to_amount
self.to_asset = to_asset
def __repr__(self):
return '<{} {} from_asset:{} to_asset:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.from_mediated_transfer.asset),
pex(self.to_asset),
)
def _run(self): # pylint: disable=method-hidden,too-many-locals
fee = 0
raiden = self.raiden
from_mediated_transfer = self.from_mediated_transfer
hashlock = from_mediated_transfer.lock.hashlock
from_asset = from_mediated_transfer.asset
to_asset = self.to_asset
to_amount = self.to_amount
to_assetmanager = raiden.get_manager_by_asset_address(to_asset)
from_assetmanager = raiden.get_manager_by_asset_address(from_asset)
from_transfermanager = from_assetmanager.transfermanager
from_channel = from_assetmanager.get_channel_by_partner_address(
from_mediated_transfer.sender,
)
from_transfermanager.register_task_for_hashlock(self, hashlock)
from_assetmanager.register_channel_for_hashlock(from_channel, hashlock)
lock_expiration = from_mediated_transfer.lock.expiration - raiden.config['reveal_timeout']
lock_timeout = lock_expiration - raiden.get_block_number()
to_routes = to_assetmanager.get_best_routes(
from_mediated_transfer.lock.amount,
from_mediated_transfer.initiator, # route back to the initiator
lock_timeout,
)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'EXCHANGE TRANSFER %s -> %s msghash:%s hashlock:%s',
pex(from_mediated_transfer.target),
pex(from_mediated_transfer.initiator),
pex(from_mediated_transfer.hash),
pex(hashlock),
)
secret_request = SecretRequest(
from_mediated_transfer.identifier,
from_mediated_transfer.lock.hashlock,
from_mediated_transfer.lock.amount,
)
raiden.sign(secret_request)
raiden.send_async(from_mediated_transfer.initiator, secret_request)
for path, to_channel in to_routes:
to_next_hop = path[1]
to_mediated_transfer = to_channel.create_mediatedtransfer(
raiden.address, # this node is the new initiator
from_mediated_transfer.initiator, # the initiator is the target for the to_asset
fee,
to_amount,
lock_expiration,
hashlock, # use the original hashlock
)
raiden.sign(to_mediated_transfer)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'MEDIATED TRANSFER NEW PATH path:%s hashlock:%s',
lpex(path),
pex(from_mediated_transfer.lock.hashlock),
)
# Using assetmanager to register the interest because it outlives
# this task, the secret handling will happen only _once_
to_assetmanager.register_channel_for_hashlock(
to_channel,
hashlock,
)
to_channel.register_transfer(to_mediated_transfer)
response = self.send_and_wait_valid(raiden, to_mediated_transfer)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'EXCHANGE TRANSFER NEW PATH path:%s hashlock:%s',
lpex(path),
pex(hashlock),
)
# only refunds for `from_asset` must be considered (check send_and_wait_valid)
if isinstance(response, RefundTransfer):
if response.lock.amount != to_mediated_transfer.amount:
log.info(
'Partner %s sent an invalid refund message with an invalid amount',
pex(to_next_hop),
)
timeout_message = from_channel.create_timeouttransfer_for(
from_mediated_transfer
)
raiden.send_async(from_mediated_transfer.sender, timeout_message)
self.transfermanager.on_hashlock_result(hashlock, False)
return
else:
to_channel.register_transfer(response)
elif isinstance(response, Secret):
# this node is receiving the from_asset and sending the
# to_asset, meaning that it can claim the to_asset but it needs
# a Secret message to claim the from_asset
to_assetmanager.handle_secretmessage(response)
from_assetmanager.handle_secretmessage(response)
self._wait_for_unlock_or_close(
raiden,
from_assetmanager,
from_channel,
from_mediated_transfer,
)
def send_and_wait_valid(self, raiden, mediated_transfer):
response_iterator = self._send_and_wait_time(
raiden,
mediated_transfer.recipient,
mediated_transfer,
raiden.config['msg_timeout'],
)
for response in response_iterator:
if response is None:
log.error(
'EXCHANGE TIMED OUT node:%s hashlock:%s',
pex(raiden.address),
pex(mediated_transfer.lock.hashlock),
)
return None
if isinstance(response, Secret):
if sha3(response.secret) != mediated_transfer.lock.hashlock:
log.error('Secret doesnt match the hashlock, ignoring.')
continue
return response
# first check that the message is from a known/valid sender/asset
valid_target = response.target == raiden.address
valid_sender = response.sender == mediated_transfer.recipient
valid_asset = response.asset == mediated_transfer.asset
if not valid_target or not valid_sender or not valid_asset:
log.error(
'Invalid message [%s] supplied to the task, ignoring.',
repr(response),
)
continue
if isinstance(response, RefundTransfer):
return response
return None
| tomaaron/raiden | raiden/tasks.py | Python | mit | 50,038 |
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
#
# This simple example shows how to call dlib's optimal linear assignment problem solver.
# It is an implementation of the famous Hungarian algorithm and is quite fast, operating in
# O(N^3) time.
#
# COMPILING THE DLIB PYTHON INTERFACE
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
# you are using another python version or operating system then you need to
# compile the dlib python interface before you can use this file. To do this,
# run compile_dlib_python_module.bat. This should work on any operating system
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
import dlib
# Lets imagine you need to assign N people to N jobs. Additionally, each person will make
# your company a certain amount of money at each job, but each person has different skills
# so they are better at some jobs and worse at others. You would like to find the best way
# to assign people to these jobs. In particular, you would like to maximize the amount of
# money the group makes as a whole. This is an example of an assignment problem and is
# what is solved by the dlib.max_cost_assignment() routine.
# So in this example, lets imagine we have 3 people and 3 jobs. We represent the amount of
# money each person will produce at each job with a cost matrix. Each row corresponds to a
# person and each column corresponds to a job. So for example, below we are saying that
# person 0 will make $1 at job 0, $2 at job 1, and $6 at job 2.
cost = dlib.matrix([[1, 2, 6],
[5, 3, 6],
[4, 5, 0]])
# To find out the best assignment of people to jobs we just need to call this function.
assignment = dlib.max_cost_assignment(cost)
# This prints optimal assignments: [2, 0, 1]
# which indicates that we should assign the person from the first row of the cost matrix to
# job 2, the middle row person to job 0, and the bottom row person to job 1.
print "optimal assignments: ", assignment
# This prints optimal cost: 16.0
# which is correct since our optimal assignment is 6+5+5.
print "optimal cost: ", dlib.assignment_cost(cost, assignment)
| kaathleen/LeapGesture-library | DynamicGestures/dlib-18.5/python_examples/max_cost_assignment.py | Python | mit | 2,357 |
#
# First, let us create some utility functions for Plotting
#
def pd_centers(featuresUsed, centers):
from itertools import cycle, islice
from pandas.tools.plotting import parallel_coordinates
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
colNames = list(featuresUsed)
colNames.append('prediction')
# Zip with a column called 'prediction' (index)
Z = [np.append(A, index) for index, A in enumerate(centers)]
# Convert to pandas for plotting
P = pd.DataFrame(Z, columns=colNames)
P['prediction'] = P['prediction'].astype(int)
return P
def parallel_plot(data):
from itertools import cycle, islice
from pandas.tools.plotting import parallel_coordinates
import matplotlib.pyplot as plt
my_colors = list(islice(cycle(['b', 'r', 'g', 'y', 'k']), None, len(data)))
plt.figure(figsize=(15,8)).gca().axes.set_ylim([-2.5,+2.5])
parallel_coordinates(data, 'prediction', color = my_colors, marker='o') | harishkrao/DSE200x | Week-1-Intro-new/customplot.py | Python | mit | 940 |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_sso import claims
from rest_framework_sso.models import SessionToken
from rest_framework_sso.serializers import SessionTokenSerializer, AuthorizationTokenSerializer
from rest_framework_sso.settings import api_settings
import logging
logger = logging.getLogger(__name__)
create_session_payload = api_settings.CREATE_SESSION_PAYLOAD
create_authorization_payload = api_settings.CREATE_AUTHORIZATION_PAYLOAD
encode_jwt_token = api_settings.ENCODE_JWT_TOKEN
decode_jwt_token = api_settings.DECODE_JWT_TOKEN
class BaseAPIView(APIView):
"""
Base API View that various JWT interactions inherit from.
"""
throttle_classes = ()
permission_classes = ()
serializer_class = None
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
return {"request": self.request, "view": self}
def get_serializer_class(self):
"""
Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, "
"or override the `get_serializer_class()` method." % self.__class__.__name__
)
return self.serializer_class
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs["context"] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
class ObtainSessionTokenView(BaseAPIView):
"""
Returns a JSON Web Token that can be used for authenticated requests.
"""
permission_classes = ()
serializer_class = SessionTokenSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data["user"]
session_token = SessionToken.objects.active().filter(user=user).with_user_agent(request=request).first()
if session_token is None:
session_token = SessionToken(user=user)
session_token.update_attributes(request=request)
session_token.save()
payload = create_session_payload(session_token=session_token, user=user)
jwt_token = encode_jwt_token(payload=payload)
return Response({"token": jwt_token})
class ObtainAuthorizationTokenView(BaseAPIView):
"""
Returns a JSON Web Token that can be used for authenticated requests.
"""
permission_classes = (IsAuthenticated,)
serializer_class = AuthorizationTokenSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
if hasattr(request.auth, "get") and request.auth.get(claims.SESSION_ID):
try:
session_token = SessionToken.objects.active().get(
pk=request.auth.get(claims.SESSION_ID), user=request.user
)
except SessionToken.DoesNotExist:
return Response({"detail": "Invalid token."}, status=status.HTTP_401_UNAUTHORIZED)
else:
session_token = (
SessionToken.objects.active().filter(user=request.user).with_user_agent(request=request).first()
)
if session_token is None:
session_token = SessionToken(user=request.user)
session_token.update_attributes(request=request)
session_token.save()
payload = create_authorization_payload(
session_token=session_token, user=request.user, **serializer.validated_data
)
jwt_token = encode_jwt_token(payload=payload)
return Response({"token": jwt_token})
obtain_session_token = ObtainSessionTokenView.as_view()
obtain_authorization_token = ObtainAuthorizationTokenView.as_view()
| namespace-ee/django-rest-framework-sso | rest_framework_sso/views.py | Python | mit | 4,556 |
# vim: set ff=unix expandtab ts=4 sw=4:
from unittest import TestCase, main
import numpy as np
from CompartmentalSystems.bins.TimeField import TimeField
class TestTimeField(TestCase):
def setUp(self):
self.ar = np.zeros(3)
self.ar[2] = 2
self.arr = np.zeros((3, 2))
self.arr[2, 1] = 2
def test_number_of_Ts_entries(self):
tf = TimeField(self.arr, 0.1)
self.assertEqual(tf.number_of_Ts_entries, 3)
if __name__ == "__main__":
main()
| MPIBGC-TEE/CompartmentalSystems | tests/bins/TestTimeField.py | Python | mit | 496 |
"""
RFB protocol implementattion, client side.
Override RFBClient and RFBFactory in your application.
See vncviewer.py for an example.
Reference:
http://www.realvnc.com/docs/rfbproto.pdf
(C) 2003 cliechti@gmx.net
MIT License
"""
# flake8: noqa
import sys
import math
import zlib
import getpass
import os
from Crypto.Cipher import AES
from Crypto.Hash import MD5
from Crypto.Util.Padding import pad
from Crypto.Util.number import bytes_to_long, long_to_bytes
from struct import pack, unpack
from . import pyDes
from twisted.python import usage, log
from twisted.internet.protocol import Protocol
from twisted.internet import protocol
from twisted.application import internet, service
#~ from twisted.internet import reactor
# Python3 compatibility replacement for ord(str) as ord(byte)
if sys.version_info[0] >= 3:
original_ord = ord
def ord(x):
# in python 2, there are two possible cases ord is used.
# * string of length > 1, --(index access)--> string of length 1 --(ord)--> int
# * string of length 1 --(ord)--> int
# however in python3, this usage morphs into
# * byte of length > 1, --(index access)--> int --(ord)--> Error
# * byte of length 1 --(ord)--> int
if isinstance(x, int):
return x
elif isinstance(x, bytes) or isinstance(x, str):
return original_ord(x)
else:
raise TypeError(f"our customized ord takes an int, a byte, or a str. Got {type(x)} : {x}")
#encoding-type
#for SetEncodings()
RAW_ENCODING = 0
COPY_RECTANGLE_ENCODING = 1
RRE_ENCODING = 2
CORRE_ENCODING = 4
HEXTILE_ENCODING = 5
ZLIB_ENCODING = 6
TIGHT_ENCODING = 7
ZLIBHEX_ENCODING = 8
ZRLE_ENCODING = 16
#0xffffff00 to 0xffffffff tight options
PSEUDO_CURSOR_ENCODING = -239
PSEUDO_DESKTOP_SIZE_ENCODING = -223
#keycodes
#for KeyEvent()
KEY_BackSpace = 0xff08
KEY_Tab = 0xff09
KEY_Return = 0xff0d
KEY_Escape = 0xff1b
KEY_Insert = 0xff63
KEY_Delete = 0xffff
KEY_Home = 0xff50
KEY_End = 0xff57
KEY_PageUp = 0xff55
KEY_PageDown = 0xff56
KEY_Left = 0xff51
KEY_Up = 0xff52
KEY_Right = 0xff53
KEY_Down = 0xff54
KEY_F1 = 0xffbe
KEY_F2 = 0xffbf
KEY_F3 = 0xffc0
KEY_F4 = 0xffc1
KEY_F5 = 0xffc2
KEY_F6 = 0xffc3
KEY_F7 = 0xffc4
KEY_F8 = 0xffc5
KEY_F9 = 0xffc6
KEY_F10 = 0xffc7
KEY_F11 = 0xffc8
KEY_F12 = 0xffc9
KEY_F13 = 0xFFCA
KEY_F14 = 0xFFCB
KEY_F15 = 0xFFCC
KEY_F16 = 0xFFCD
KEY_F17 = 0xFFCE
KEY_F18 = 0xFFCF
KEY_F19 = 0xFFD0
KEY_F20 = 0xFFD1
KEY_ShiftLeft = 0xffe1
KEY_ShiftRight = 0xffe2
KEY_ControlLeft = 0xffe3
KEY_ControlRight = 0xffe4
KEY_MetaLeft = 0xffe7
KEY_MetaRight = 0xffe8
KEY_AltLeft = 0xffe9
KEY_AltRight = 0xffea
KEY_Scroll_Lock = 0xFF14
KEY_Sys_Req = 0xFF15
KEY_Num_Lock = 0xFF7F
KEY_Caps_Lock = 0xFFE5
KEY_Pause = 0xFF13
KEY_Super_L = 0xFFEB
KEY_Super_R = 0xFFEC
KEY_Hyper_L = 0xFFED
KEY_Hyper_R = 0xFFEE
KEY_KP_0 = 0xFFB0
KEY_KP_1 = 0xFFB1
KEY_KP_2 = 0xFFB2
KEY_KP_3 = 0xFFB3
KEY_KP_4 = 0xFFB4
KEY_KP_5 = 0xFFB5
KEY_KP_6 = 0xFFB6
KEY_KP_7 = 0xFFB7
KEY_KP_8 = 0xFFB8
KEY_KP_9 = 0xFFB9
KEY_KP_Enter = 0xFF8D
KEY_ForwardSlash = 0x002F
KEY_BackSlash = 0x005C
KEY_SpaceBar= 0x0020
# ZRLE helpers
def _zrle_next_bit(it, pixels_in_tile):
num_pixels = 0
while True:
b = ord(next(it))
for n in range(8):
value = b >> (7 - n)
yield value & 1
num_pixels += 1
if num_pixels == pixels_in_tile:
return
def _zrle_next_dibit(it, pixels_in_tile):
num_pixels = 0
while True:
b = ord(next(it))
for n in range(0, 8, 2):
value = b >> (6 - n)
yield value & 3
num_pixels += 1
if num_pixels == pixels_in_tile:
return
def _zrle_next_nibble(it, pixels_in_tile):
num_pixels = 0
while True:
b = ord(next(it))
for n in range(0, 8, 4):
value = b >> (4 - n)
yield value & 15
num_pixels += 1
if num_pixels == pixels_in_tile:
return
class RFBClient(Protocol):
def __init__(self):
self._packet = []
self._packet_len = 0
self._handler = self._handleInitial
self._already_expecting = 0
self._version = None
self._version_server = None
self._zlib_stream = zlib.decompressobj(0)
#------------------------------------------------------
# states used on connection startup
#------------------------------------------------------
def _handleInitial(self):
buffer = b''.join(self._packet)
if b'\n' in buffer:
version = 3.3
if buffer[:3] == b'RFB':
version_server = float(buffer[3:-1].replace(b'0', b''))
SUPPORTED_VERSIONS = (3.3, 3.7, 3.8)
if version_server == 3.889: # Apple Remote Desktop
version_server = 3.8
if version_server in SUPPORTED_VERSIONS:
version = version_server
else:
log.msg("Protocol version %.3f not supported"
% version_server)
version = max(filter(
lambda x: x <= version_server, SUPPORTED_VERSIONS))
buffer = buffer[12:]
log.msg("Using protocol version %.3f" % version)
parts = str(version).split('.')
self.transport.write(
bytes(b"RFB %03d.%03d\n" % (int(parts[0]), int(parts[1]))))
self._packet[:] = [buffer]
self._packet_len = len(buffer)
self._handler = self._handleExpected
self._version = version
self._version_server = version_server
if version < 3.7:
self.expect(self._handleAuth, 4)
else:
self.expect(self._handleNumberSecurityTypes, 1)
else:
self._packet[:] = [buffer]
self._packet_len = len(buffer)
def _handleNumberSecurityTypes(self, block):
(num_types,) = unpack("!B", block)
if num_types:
self.expect(self._handleSecurityTypes, num_types)
else:
self.expect(self._handleConnFailed, 4)
def _handleSecurityTypes(self, block):
types = unpack("!%dB" % len(block), block)
SUPPORTED_TYPES = (1, 2, 30)
valid_types = [sec_type for sec_type in types if sec_type in SUPPORTED_TYPES]
if valid_types:
sec_type = max(valid_types)
self.transport.write(pack("!B", sec_type))
if sec_type == 1:
if self._version < 3.8:
self._doClientInitialization()
else:
self.expect(self._handleVNCAuthResult, 4)
elif sec_type == 2:
self.expect(self._handleVNCAuth, 16)
elif sec_type == 30: # Apple Remote Desktop
self.expect(self._handleAppleAuth, 4)
else:
log.msg("unknown security types: %s" % repr(types))
def _handleAuth(self, block):
(auth,) = unpack("!I", block)
#~ print "auth:", auth
if auth == 0:
self.expect(self._handleConnFailed, 4)
elif auth == 1:
self._doClientInitialization()
return
elif auth == 2:
self.expect(self._handleVNCAuth, 16)
else:
log.msg("unknown auth response (%d)" % auth)
def _handleConnFailed(self, block):
(waitfor,) = unpack("!I", block)
self.expect(self._handleConnMessage, waitfor)
def _handleConnMessage(self, block):
log.msg("Connection refused: %r" % block)
def _handleVNCAuth(self, block):
self._challenge = block
self.vncRequestPassword()
self.expect(self._handleVNCAuthResult, 4)
def _handleAppleAuth(self, block):
authMeta = unpack("!%dB" % len(block), block)
self.generator = authMeta[1]
self.keyLen = authMeta[3]
self.expect(self._handleAppleAuthKey, self.keyLen)
def _handleAppleAuthKey(self, block):
self.modulus = block
self.expect(self._handleAppleAuthCert, self.keyLen)
def _handleAppleAuthCert(self, block):
self.serverKey = block
self.ardRequestCredentials()
self._encryptArd()
self.expect(self._handleVNCAuthResult, 4)
def _encryptArd(self):
userStruct = self.factory.username + ("\0" * (64 - len(self.factory.username))) + self.factory.password + ("\0" * (64 - len(self.factory.password)))
s = bytes_to_long(os.urandom(512))
g = self.generator
kl = self.keyLen
m = bytes_to_long(self.modulus)
sk = bytes_to_long(self.serverKey)
key = long_to_bytes(pow(g,s,m))
shared = long_to_bytes(pow(sk,s,m))
h = MD5.new()
h.update(shared)
keyDigest = h.digest()
cipher = AES.new(keyDigest, AES.MODE_ECB)
ciphertext = cipher.encrypt(userStruct.encode('utf-8'))
self.transport.write(ciphertext+key)
def ardRequestCredentials(self):
if self.factory.username is None:
self.factory.username = input('Apple username: ')
if self.factory.password is None:
self.factory.password = getpass.getpass('Apple password:')
def sendPassword(self, password):
"""send password"""
pw = (password + '\0' * 8)[:8] #make sure its 8 chars long, zero padded
des = RFBDes(pw)
response = des.encrypt(self._challenge)
self.transport.write(response)
def _handleVNCAuthResult(self, block):
(result,) = unpack("!I", block)
#~ print "auth:", auth
if result == 0: #OK
self._doClientInitialization()
return
elif result == 1: #failed
if self._version < 3.8:
self.vncAuthFailed("authentication failed")
self.transport.loseConnection()
else:
self.expect(self._handleAuthFailed, 4)
elif result == 2: #too many
if self._version < 3.8:
self.vncAuthFailed("too many tries to log in")
self.transport.loseConnection()
else:
self.expect(self._handleAuthFailed, 4)
else:
log.msg("unknown auth response (%d)" % result)
def _handleAuthFailed(self, block):
(waitfor,) = unpack("!I", block)
self.expect(self._handleAuthFailedMessage, waitfor)
def _handleAuthFailedMessage(self, block):
self.vncAuthFailed(block)
self.transport.loseConnection()
def _doClientInitialization(self):
self.transport.write(pack("!B", self.factory.shared))
self.expect(self._handleServerInit, 24)
def _handleServerInit(self, block):
(self.width, self.height, pixformat, namelen) = unpack("!HH16sI", block)
(self.bpp, self.depth, self.bigendian, self.truecolor,
self.redmax, self.greenmax, self.bluemax,
self.redshift, self.greenshift, self.blueshift) = \
unpack("!BBBBHHHBBBxxx", pixformat)
self.bypp = self.bpp // 8 #calc bytes per pixel
self.expect(self._handleServerName, namelen)
def _handleServerName(self, block):
self.name = block
#callback:
self.vncConnectionMade()
self.expect(self._handleConnection, 1)
#------------------------------------------------------
# Server to client messages
#------------------------------------------------------
def _handleConnection(self, block):
(msgid,) = unpack("!B", block)
if msgid == 0:
self.expect(self._handleFramebufferUpdate, 3)
elif msgid == 2:
self.bell()
self.expect(self._handleConnection, 1)
elif msgid == 3:
self.expect(self._handleServerCutText, 7)
else:
log.msg("unknown message received (id %d)" % msgid)
self.expect(self._handleConnection, 1)
def _handleFramebufferUpdate(self, block):
(self.rectangles,) = unpack("!xH", block)
self.rectanglePos = []
self.beginUpdate()
self._doConnection()
def _doConnection(self):
if self.rectangles:
self.expect(self._handleRectangle, 12)
else:
self.commitUpdate(self.rectanglePos)
self.expect(self._handleConnection, 1)
def _handleRectangle(self, block):
(x, y, width, height, encoding) = unpack("!HHHHi", block)
if self.rectangles:
self.rectangles -= 1
self.rectanglePos.append( (x, y, width, height) )
if encoding == COPY_RECTANGLE_ENCODING:
self.expect(self._handleDecodeCopyrect, 4, x, y, width, height)
elif encoding == RAW_ENCODING:
self.expect(self._handleDecodeRAW, width*height*self.bypp, x, y, width, height)
elif encoding == HEXTILE_ENCODING:
self._doNextHextileSubrect(None, None, x, y, width, height, None, None)
elif encoding == CORRE_ENCODING:
self.expect(self._handleDecodeCORRE, 4 + self.bypp, x, y, width, height)
elif encoding == RRE_ENCODING:
self.expect(self._handleDecodeRRE, 4 + self.bypp, x, y, width, height)
elif encoding == ZRLE_ENCODING:
self.expect(self._handleDecodeZRLE, 4, x, y, width, height)
elif encoding == PSEUDO_CURSOR_ENCODING:
length = width * height * self.bypp
length += int(math.floor((width + 7.0) / 8)) * height
self.expect(self._handleDecodePsuedoCursor, length, x, y, width, height)
elif encoding == PSEUDO_DESKTOP_SIZE_ENCODING:
self._handleDecodeDesktopSize(width, height)
else:
log.msg("unknown encoding received (encoding %d)" % encoding)
self._doConnection()
else:
self._doConnection()
# --- RAW Encoding
def _handleDecodeRAW(self, block, x, y, width, height):
#TODO convert pixel format?
self.updateRectangle(x, y, width, height, block)
self._doConnection()
# --- CopyRect Encoding
def _handleDecodeCopyrect(self, block, x, y, width, height):
(srcx, srcy) = unpack("!HH", block)
self.copyRectangle(srcx, srcy, x, y, width, height)
self._doConnection()
# --- RRE Encoding
def _handleDecodeRRE(self, block, x, y, width, height):
(subrects,) = unpack("!I", block[:4])
color = block[4:]
self.fillRectangle(x, y, width, height, color)
if subrects:
self.expect(self._handleRRESubRectangles, (8 + self.bypp) * subrects, x, y)
else:
self._doConnection()
def _handleRRESubRectangles(self, block, topx, topy):
#~ print "_handleRRESubRectangle"
pos = 0
end = len(block)
sz = self.bypp + 8
format = "!%dsHHHH" % self.bypp
while pos < end:
(color, x, y, width, height) = unpack(format, block[pos:pos+sz])
self.fillRectangle(topx + x, topy + y, width, height, color)
pos += sz
self._doConnection()
# --- CoRRE Encoding
def _handleDecodeCORRE(self, block, x, y, width, height):
(subrects,) = unpack("!I", block[:4])
color = block[4:]
self.fillRectangle(x, y, width, height, color)
if subrects:
self.expect(self._handleDecodeCORRERectangles, (4 + self.bypp)*subrects, x, y)
else:
self._doConnection()
def _handleDecodeCORRERectangles(self, block, topx, topy):
#~ print "_handleDecodeCORRERectangle"
pos = 0
end = len(block)
sz = self.bypp + 4
format = "!%dsBBBB" % self.bypp
while pos < sz:
(color, x, y, width, height) = unpack(format, block[pos:pos+sz])
self.fillRectangle(topx + x, topy + y, width, height, color)
pos += sz
self._doConnection()
# --- Hexile Encoding
def _doNextHextileSubrect(self, bg, color, x, y, width, height, tx, ty):
#~ print "_doNextHextileSubrect %r" % ((color, x, y, width, height, tx, ty), )
#coords of next tile
#its line after line of tiles
#finished when the last line is completly received
#dont inc the first time
if tx is not None:
#calc next subrect pos
tx += 16
if tx >= x + width:
tx = x
ty += 16
else:
tx = x
ty = y
#more tiles?
if ty >= y + height:
self._doConnection()
else:
self.expect(self._handleDecodeHextile, 1, bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextile(self, block, bg, color, x, y, width, height, tx, ty):
(subencoding,) = unpack("!B", block)
#calc tile size
tw = th = 16
if x + width - tx < 16: tw = x + width - tx
if y + height - ty < 16: th = y + height- ty
#decode tile
if subencoding & 1: #RAW
self.expect(self._handleDecodeHextileRAW, tw*th*self.bypp, bg, color, x, y, width, height, tx, ty, tw, th)
else:
numbytes = 0
if subencoding & 2: #BackgroundSpecified
numbytes += self.bypp
if subencoding & 4: #ForegroundSpecified
numbytes += self.bypp
if subencoding & 8: #AnySubrects
numbytes += 1
if numbytes:
self.expect(self._handleDecodeHextileSubrect, numbytes, subencoding, bg, color, x, y, width, height, tx, ty, tw, th)
else:
self.fillRectangle(tx, ty, tw, th, bg)
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileSubrect(self, block, subencoding, bg, color, x, y, width, height, tx, ty, tw, th):
subrects = 0
pos = 0
if subencoding & 2: #BackgroundSpecified
bg = block[:self.bypp]
pos += self.bypp
self.fillRectangle(tx, ty, tw, th, bg)
if subencoding & 4: #ForegroundSpecified
color = block[pos:pos+self.bypp]
pos += self.bypp
if subencoding & 8: #AnySubrects
#~ (subrects, ) = unpack("!B", block)
# In python2, block : string, block[pos] : string, ord(block[pos]) : int
# In python3, block : byte, block[pos] : int, ord(block[pos]) : error
subrects = ord(block[pos])
#~ print subrects
if subrects:
if subencoding & 16: #SubrectsColoured
self.expect(self._handleDecodeHextileSubrectsColoured, (self.bypp + 2)*subrects, bg, color, subrects, x, y, width, height, tx, ty, tw, th)
else:
self.expect(self._handleDecodeHextileSubrectsFG, 2*subrects, bg, color, subrects, x, y, width, height, tx, ty, tw, th)
else:
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileRAW(self, block, bg, color, x, y, width, height, tx, ty, tw, th):
"""the tile is in raw encoding"""
self.updateRectangle(tx, ty, tw, th, block)
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileSubrectsColoured(self, block, bg, color, subrects, x, y, width, height, tx, ty, tw, th):
"""subrects with their own color"""
sz = self.bypp + 2
pos = 0
end = len(block)
while pos < end:
pos2 = pos + self.bypp
color = block[pos:pos2]
xy = ord(block[pos2])
wh = ord(block[pos2+1])
sx = xy >> 4
sy = xy & 0xf
sw = (wh >> 4) + 1
sh = (wh & 0xf) + 1
self.fillRectangle(tx + sx, ty + sy, sw, sh, color)
pos += sz
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileSubrectsFG(self, block, bg, color, subrects, x, y, width, height, tx, ty, tw, th):
"""all subrect with same color"""
pos = 0
end = len(block)
while pos < end:
xy = ord(block[pos])
wh = ord(block[pos+1])
sx = xy >> 4
sy = xy & 0xf
sw = (wh >> 4) + 1
sh = (wh & 0xf) + 1
self.fillRectangle(tx + sx, ty + sy, sw, sh, color)
pos += 2
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
# --- ZRLE Encoding
def _handleDecodeZRLE(self, block, x, y, width, height):
"""
Handle ZRLE encoding.
See https://tools.ietf.org/html/rfc6143#section-7.7.6 (ZRLE)
and https://tools.ietf.org/html/rfc6143#section-7.7.5 (TRLE)
"""
(compressed_bytes,) = unpack("!L", block)
self.expect(self._handleDecodeZRLEdata, compressed_bytes, x, y, width, height)
def _handleDecodeZRLEdata(self, block, x, y, width, height):
tx = x
ty = y
data = self._zlib_stream.decompress(block)
it = iter(data)
def cpixel(i):
yield next(i)
yield next(i)
yield next(i)
# Alpha channel
yield 0xff
while True:
try:
subencoding = ord(next(it))
except StopIteration:
break
# calc tile size
tw = th = 64
if x + width - tx < 64:
tw = x + width - tx
if y + height - ty < 64:
th = y + height - ty
pixels_in_tile = tw * th
# decode next tile
num_pixels = 0
pixel_data = bytearray()
palette_size = subencoding & 127
if subencoding & 0x80:
# RLE
def do_rle(pixel):
run_length_next = ord(next(it))
run_length = run_length_next
while run_length_next == 255:
run_length_next = ord(next(it))
run_length += run_length_next
pixel_data.extend(pixel * (run_length + 1))
return run_length + 1
if palette_size == 0:
# plain RLE
while num_pixels < pixels_in_tile:
color = bytearray(cpixel(it))
num_pixels += do_rle(color)
if num_pixels != pixels_in_tile:
raise ValueError("too many pixels")
else:
palette = [bytearray(cpixel(it)) for p in range(palette_size)]
while num_pixels < pixels_in_tile:
palette_index = ord(next(it))
if palette_index & 0x80:
palette_index &= 0x7F
# run of length > 1, more bytes follow to determine run length
num_pixels += do_rle(palette[palette_index])
else:
# run of length 1
pixel_data.extend(palette[palette_index])
num_pixels += 1
if num_pixels != pixels_in_tile:
raise ValueError("too many pixels")
self.updateRectangle(tx, ty, tw, th, bytes(pixel_data))
else:
# No RLE
if palette_size == 0:
# Raw pixel data
pixel_data = b''.join(bytes(cpixel(it)) for _ in range(pixels_in_tile))
self.updateRectangle(tx, ty, tw, th, bytes(pixel_data))
elif palette_size == 1:
# Fill tile with plain color
color = bytearray(cpixel(it))
self.fillRectangle(tx, ty, tw, th, bytes(color))
else:
if palette_size > 16:
raise ValueError(
"Palette of size {0} is not allowed".format(palette_size))
palette = [bytearray(cpixel(it)) for _ in range(palette_size)]
if palette_size == 2:
next_index = _zrle_next_bit(it, pixels_in_tile)
elif palette_size == 3 or palette_size == 4:
next_index = _zrle_next_dibit(it, pixels_in_tile)
else:
next_index = _zrle_next_nibble(it, pixels_in_tile)
for palette_index in next_index:
pixel_data.extend(palette[palette_index])
self.updateRectangle(tx, ty, tw, th, bytes(pixel_data))
# Next tile
tx = tx + 64
if tx >= x + width:
tx = x
ty = ty + 64
self._doConnection()
# --- Pseudo Cursor Encoding
def _handleDecodePsuedoCursor(self, block, x, y, width, height):
split = width * height * self.bypp
image = block[:split]
mask = block[split:]
self.updateCursor(x, y, width, height, image, mask)
self._doConnection()
# --- Pseudo Desktop Size Encoding
def _handleDecodeDesktopSize(self, width, height):
self.updateDesktopSize(width, height)
self._doConnection()
# --- other server messages
def _handleServerCutText(self, block):
(length, ) = unpack("!xxxI", block)
self.expect(self._handleServerCutTextValue, length)
def _handleServerCutTextValue(self, block):
self.copy_text(block)
self.expect(self._handleConnection, 1)
#------------------------------------------------------
# incomming data redirector
#------------------------------------------------------
def dataReceived(self, data):
#~ sys.stdout.write(repr(data) + '\n')
#~ print len(data), ", ", len(self._packet)
self._packet.append(data)
self._packet_len += len(data)
self._handler()
def _handleExpected(self):
if self._packet_len >= self._expected_len:
buffer = b''.join(self._packet)
while len(buffer) >= self._expected_len:
self._already_expecting = 1
block, buffer = buffer[:self._expected_len], buffer[self._expected_len:]
#~ log.msg("handle %r with %r\n" % (block, self._expected_handler.__name__))
self._expected_handler(block, *self._expected_args, **self._expected_kwargs)
self._packet[:] = [buffer]
self._packet_len = len(buffer)
self._already_expecting = 0
def expect(self, handler, size, *args, **kwargs):
#~ log.msg("expect(%r, %r, %r, %r)\n" % (handler.__name__, size, args, kwargs))
self._expected_handler = handler
self._expected_len = size
self._expected_args = args
self._expected_kwargs = kwargs
if not self._already_expecting:
self._handleExpected() #just in case that there is already enough data
#------------------------------------------------------
# client -> server messages
#------------------------------------------------------
def setPixelFormat(self, bpp=32, depth=24, bigendian=0, truecolor=1, redmax=255, greenmax=255, bluemax=255, redshift=0, greenshift=8, blueshift=16):
pixformat = pack("!BBBBHHHBBBxxx", bpp, depth, bigendian, truecolor, redmax, greenmax, bluemax, redshift, greenshift, blueshift)
self.transport.write(pack("!Bxxx16s", 0, pixformat))
#rember these settings
self.bpp, self.depth, self.bigendian, self.truecolor = bpp, depth, bigendian, truecolor
self.redmax, self.greenmax, self.bluemax = redmax, greenmax, bluemax
self.redshift, self.greenshift, self.blueshift = redshift, greenshift, blueshift
self.bypp = self.bpp // 8 #calc bytes per pixel
#~ print self.bypp
def setEncodings(self, list_of_encodings):
self.transport.write(pack("!BxH", 2, len(list_of_encodings)))
for encoding in list_of_encodings:
self.transport.write(pack("!i", encoding))
def framebufferUpdateRequest(self, x=0, y=0, width=None, height=None, incremental=0):
if width is None: width = self.width - x
if height is None: height = self.height - y
self.transport.write(pack("!BBHHHH", 3, incremental, x, y, width, height))
def keyEvent(self, key, down=1):
"""For most ordinary keys, the "keysym" is the same as the corresponding ASCII value.
Other common keys are shown in the KEY_ constants."""
self.transport.write(pack("!BBxxI", 4, down, key))
def pointerEvent(self, x, y, buttonmask=0):
"""Indicates either pointer movement or a pointer button press or release. The pointer is
now at (x-position, y-position), and the current state of buttons 1 to 8 are represented
by bits 0 to 7 of button-mask respectively, 0 meaning up, 1 meaning down (pressed).
"""
self.transport.write(pack("!BBHH", 5, buttonmask, x, y))
def clientCutText(self, message):
"""The client has new ASCII text in its cut buffer.
(aka clipboard)
"""
self.transport.write(pack("!BxxxI", 6, len(message)) + message)
#------------------------------------------------------
# callbacks
# override these in your application
#------------------------------------------------------
def vncConnectionMade(self):
"""connection is initialized and ready.
typicaly, the pixel format is set here."""
def vncRequestPassword(self):
"""a password is needed to log on, use sendPassword() to
send one."""
if self.factory.password is None:
log.msg("need a password")
self.transport.loseConnection()
return
self.sendPassword(self.factory.password)
def vncAuthFailed(self, reason):
"""called when the authentication failed.
the connection is closed."""
log.msg("Cannot connect %s" % reason)
def beginUpdate(self):
"""called before a series of updateRectangle(),
copyRectangle() or fillRectangle()."""
def commitUpdate(self, rectangles=None):
"""called after a series of updateRectangle(), copyRectangle()
or fillRectangle() are finished.
typicaly, here is the place to request the next screen
update with FramebufferUpdateRequest(incremental=1).
argument is a list of tuples (x,y,w,h) with the updated
rectangles."""
def updateRectangle(self, x, y, width, height, data):
"""new bitmap data. data is a string in the pixel format set
up earlier."""
def copyRectangle(self, srcx, srcy, x, y, width, height):
"""used for copyrect encoding. copy the given rectangle
(src, srxy, width, height) to the target coords (x,y)"""
def fillRectangle(self, x, y, width, height, color):
"""fill the area with the color. the color is a string in
the pixel format set up earlier"""
#fallback variant, use update recatngle
#override with specialized function for better performance
self.updateRectangle(x, y, width, height, color*width*height)
def updateCursor(self, x, y, width, height, image, mask):
""" New cursor, focuses at (x, y)
"""
def updateDesktopSize(self, width, height):
""" New desktop size of width*height. """
def bell(self):
"""bell"""
def copy_text(self, text):
"""The server has new ASCII text in its cut buffer.
(aka clipboard)"""
class RFBFactory(protocol.ClientFactory):
"""A factory for remote frame buffer connections."""
# the class of the protocol to build
# should be overriden by application to use a derrived class
protocol = RFBClient
def __init__(self, password = None, shared = 0):
self.password = password
self.shared = shared
class RFBDes(pyDes.des):
def setKey(self, key):
"""RFB protocol for authentication requires client to encrypt
challenge sent by server with password using DES method. However,
bits in each byte of the password are put in reverse order before
using it as encryption key."""
newkey = []
for ki in range(len(key)):
bsrc = ord(key[ki])
btgt = 0
for i in range(8):
if bsrc & (1 << i):
btgt = btgt | (1 << 7-i)
newkey.append(chr(btgt))
super(RFBDes, self).setKey(newkey)
# --- test code only, see vncviewer.py
if __name__ == '__main__':
class RFBTest(RFBClient):
"""dummy client"""
def vncConnectionMade(self):
print("Screen format: depth=%d bytes_per_pixel=%r" % (self.depth, self.bpp))
print("Desktop name: %r" % self.name)
self.SetEncodings([RAW_ENCODING])
self.FramebufferUpdateRequest()
def updateRectangle(self, x, y, width, height, data):
print("%s " * 5 % (x, y, width, height, repr(data[:20])))
class RFBTestFactory(protocol.ClientFactory):
"""test factory"""
protocol = RFBTest
def clientConnectionLost(self, connector, reason):
print(reason)
from twisted.internet import reactor
reactor.stop()
#~ connector.connect()
def clientConnectionFailed(self, connector, reason):
print("connection failed:", reason)
from twisted.internet import reactor
reactor.stop()
class Options(usage.Options):
"""command line options"""
optParameters = [
['display', 'd', '0', 'VNC display'],
['host', 'h', 'localhost', 'remote hostname'],
['outfile', 'o', None, 'Logfile [default: sys.stdout]'],
]
o = Options()
try:
o.parseOptions()
except usage.UsageError as errortext:
print("%s: %s" % (sys.argv[0], errortext))
print("%s: Try --help for usage details." % (sys.argv[0]))
raise SystemExit(1)
logFile = sys.stdout
if o.opts['outfile']:
logFile = o.opts['outfile']
log.startLogging(logFile)
host = o.opts['host']
port = int(o.opts['display']) + 5900
application = service.Application("rfb test") # create Application
# connect to this host and port, and reconnect if we get disconnected
vncClient = internet.TCPClient(host, port, RFBFactory()) # create the service
vncClient.setServiceParent(application)
# this file should be run as 'twistd -y rfb.py' but it didn't work -
# could't import crippled_des.py, so using this hack.
# now with crippled_des.py replaced with pyDes this can be no more actual
from twisted.internet import reactor
vncClient.startService()
reactor.run()
| sibson/vncdotool | vncdotool/rfb.py | Python | mit | 35,587 |
"""Solve the Project Euler problems using functional Python.
https://projecteuler.net/archives
"""
from importlib import import_module
from os import listdir
from os.path import abspath, dirname
from re import match
SOLVED = set(
int(m.group(1))
for f in listdir(abspath(dirname(__file__)))
for m in (match(r"^p(\d{3})\.py$", f),) if m
)
def compute(problem: int):
"""Compute the answer to problem `problem`."""
assert problem in SOLVED, "Problem currently unsolved."
module = import_module("euler.p{:03d}".format(problem))
return module.compute()
| 2Cubed/ProjectEuler | euler/__init__.py | Python | mit | 584 |
# coding: utf8
# Greek Numeral Converter
#
# https://en.wikipedia.org/wiki/Greek_numerals
keraia = 'ʹ'
# Multiply by 10000
myriads = 'Μ'
numbers_to_letters = {
1: 'Α',
2: 'Β',
3: 'Γ',
4: 'Δ',
5: 'Ε',
6: 'ΣΤ',
7: 'Ζ',
8: 'Η',
9: 'Θ',
10: 'Ι',
20: 'Κ',
30: 'Λ',
40: 'Μ',
50: 'Ν',
60: 'Ξ',
70: 'Ο',
80: 'Π',
90: 'Ϟ',
100: 'Ρ',
200: 'Σ',
300: 'Τ',
400: 'Υ',
500: 'Φ',
600: 'Χ',
700: 'Ψ',
800: 'Ω',
900: 'Ϡ',
1000: '͵Α',
2000: '͵Β',
3000: '͵Γ',
4000: '͵Δ',
5000: '͵Ε',
6000: '͵Ϛ',
7000: '͵Z',
8000: '͵H',
9000: '͵Θ',
}
def to_greek_numeral(num):
if num == 0:
return ''
if num in numbers_to_letters:
return numbers_to_letters[num]
nums = list(numbers_to_letters.keys()); nums.reverse()
initial = []
for n in nums:
if num - n >= 0:
initial.append(numbers_to_letters[n])
num = num - n
return ''.join(initial) + keraia
def date_string_to_greek_number(datestring):
parts = datestring.split('/')
return '/'.join(list(to_greek_numeral(int(p)) for p in parts))
if __name__ == '__main__':
import unittest
class GreekNumericsTest(unittest.TestCase):
def test_numbers(self):
for i in range(1, 10):
self.assertEqual(numbers_to_letters[i], to_greek_numeral(i))
tests = (
(1983, '͵ΑϠΠΓʹ'),
(2017, '͵ΒΙΖʹ'),
(13000, '͵Θ͵Δʹ'),
(666, 'ΧΞΣΤʹ')
)
for n, expected in tests:
self.assertEqual(expected, to_greek_numeral(n))
unittest.main()
| pgk/kountanis.com | _code_examples/greek_numerals.py | Python | mit | 1,783 |
# Copyright (c) 2020 Manfred Moitzi
# License: MIT License
from pathlib import Path
from time import perf_counter
import math
from ezdxf.addons import MengerSponge
from ezdxf.addons import r12writer
from ezdxf.render.forms import sphere, circle, translate
DIR = Path("~/Desktop/Outbox").expanduser()
def menger_sponge(filename, level=1, kind=0):
t0 = perf_counter()
sponge = MengerSponge(level=level, kind=kind).mesh()
t1 = perf_counter()
print(f"Build menger sponge <{kind}> in {t1 - t0:.5f}s.")
with r12writer(filename) as r12:
r12.add_polyface(sponge.vertices, sponge.faces, color=1)
print(f'saved as "{filename}".')
def polymesh(filename, size=(10, 10)):
m, n = size # rows, cols
dx = math.pi / m * 2
dy = math.pi / n * 2
vertices = []
for x in range(m): # rows second
z1 = math.sin(dx * x)
for y in range(n): # cols first
z2 = math.sin(dy * y)
z = z1 * z2
vertices.append((x, y, z))
with r12writer(filename) as r12:
r12.add_polymesh(vertices, size=size, color=1)
print(f'saved as "{filename}".')
def polyface_sphere(filename):
mesh = sphere(16, 8, quads=True)
with r12writer(filename) as r12:
r12.add_polyface(mesh.vertices, mesh.faces, color=1)
print(f'saved as "{filename}".')
def polylines(filename):
with r12writer(filename) as r12:
r12.add_polyline_2d(circle(8), color=1, closed=False)
r12.add_polyline_2d(
translate(circle(8), vec=(3, 0)), color=3, closed=True
)
r12.add_polyline_2d(
[(0, 4), (4, 4, 1), (8, 4, 0, 0.2, 0.000001), (12, 4)],
format="xybse",
start_width=0.1,
end_width=0.1,
color=5,
)
print(f'saved as "{filename}".')
if __name__ == "__main__":
menger_sponge(DIR / "menger_sponge_r12.dxf", level=2)
polymesh(DIR / "polymesh.dxf", size=(20, 10))
polyface_sphere(DIR / "sphere.dxf")
polylines(DIR / "polylines.dxf")
| mozman/ezdxf | examples/addons/r12writer.py | Python | mit | 2,030 |
"""Group models."""
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group as AuthGroup, Permission
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.db.models.signals import m2m_changed
from django.utils.safestring import mark_safe
from taggit.managers import TaggableManager
from taggit.models import Tag
import autocomplete_light
from open_connect.media.models import Image, ShortenedURL
from open_connect.connectmessages.models import Thread, Message
from open_connect.connect_core.utils.location import get_coordinates, STATES
from open_connect.connect_core.utils.models import TimestampModel
from open_connect.groups.tasks import remove_user_from_group
autocomplete_light.register(Tag)
GROUP_STATUSES = (
('active', 'Active'),
('deleted', 'Deleted')
)
class Category(TimestampModel):
"""Group Category"""
slug = models.SlugField(unique=True)
name = models.CharField(max_length=127)
color = models.CharField(
verbose_name='Category Color', max_length=7, default='#000000')
class Meta(object):
"""Meta options for Category model"""
verbose_name = 'Category'
verbose_name_plural = 'Categories'
def __unicode__(self):
"""Unicode Representation of Category"""
return self.name
class GroupManager(models.Manager):
"""Manager for Group model."""
def get_queryset(self):
"""
Ensures that all queries fror groups also queries the auth group
model
"""
return super(GroupManager, self).get_queryset().select_related(
'group', 'category').exclude(status='deleted')
def with_deleted(self):
"""Includes deleted groups."""
return super(GroupManager, self).get_queryset().select_related(
'group', 'category')
# pylint: disable=no-self-use
def create(self, **kwargs):
"""Create a new group."""
name = kwargs.pop('name', None)
if 'group' not in kwargs and name:
kwargs['group'] = AuthGroup.objects.create(name=name)
return super(GroupManager, self).create(**kwargs)
def published(self, **kwargs):
"""Get published groups."""
return self.get_queryset().filter(published=True, **kwargs)
def search(self, search=None, location=None):
"""Groups search"""
groups = Group.objects.published().select_related('image', 'group')
if search:
groups = groups.filter(
Q(group__name__icontains=search)
| Q(category__name__icontains=search)
| Q(description__icontains=search)
| Q(tags__slug__icontains=search)
).distinct()
if location:
groups = self.location_search(location, queryset=groups)
return groups
def location_search(self, location, queryset=None):
"""Groups search by location."""
coords = get_coordinates(location)
# If no coordinates are provided, return an empty queryset
if not coords:
return Group.objects.none()
if queryset is None:
queryset = Group.objects.published()
# Pass the job of finding distance to the database using this query
sql = (
'SELECT '
'(degrees(acos( '
'sin(radians(latitude)) '
'* sin(radians(%s)) '
'+ cos(radians(latitude)) '
'* cos(radians(%s)) '
'* cos(radians(longitude - %s) ) '
') ) * 69.09)'
)
result = queryset.extra(
select={'distance': sql},
select_params=(coords[0], coords[0], coords[1]),
# We use the same SQL again to do filtering by distance and
# radius. We cannot use the param in the `SELECT` because
# of a postgres limitation
where=['(' + sql + ') <= "groups_group"."radius"'],
params=(coords[0], coords[0], coords[1]),
order_by=['-featured', 'distance', 'group__name']
).distinct()
return result
class Group(TimestampModel):
"""Group model."""
group = models.OneToOneField(AuthGroup)
private = models.BooleanField(
default=False,
help_text='Membership to private groups is moderated.'
)
published = models.BooleanField(
default=True,
verbose_name=u'Publish this group',
help_text='Published groups can be seen by all users.'
' Unpublished groups can only be seen if'
' you have the link.'
)
moderated = models.BooleanField(
default=False,
verbose_name=u'Moderate this group',
help_text='Posts by users must be moderated by an admin.'
)
featured = models.BooleanField(
default=False,
verbose_name=u'This is an official group',
help_text='Official groups are managed by staff and '
'appear first in search results.',
db_index=True
)
member_list_published = models.BooleanField(
default=True,
help_text='Group member list is public'
)
category = models.ForeignKey(
'groups.Category', verbose_name=u'Category', default=1)
display_location = models.CharField(blank=True, max_length=100)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
radius = models.IntegerField(blank=True, null=True)
is_national = models.BooleanField(default=True, db_index=True)
# owners get permissions using a receiver below: group_owners_changed
owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
blank=True,
related_name='owned_groups_set')
whitelist_users = models.ManyToManyField(
settings.AUTH_USER_MODEL,
blank=True,
related_name='whitelist_set'
)
description = models.TextField(blank=True)
tags = TaggableManager(blank=True)
image = models.ForeignKey(Image, blank=True, null=True)
state = models.CharField(
max_length=3,
choices=[(s, s) for s in STATES],
blank=True,
db_index=True
)
tos_accepted_at = models.DateTimeField(blank=True, null=True)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
status = models.CharField(
choices=GROUP_STATUSES,
default='active',
db_index=True,
max_length=50
)
objects = GroupManager()
class Meta(object):
# pylint: disable=no-init,too-few-public-methods
"""Group meta options."""
ordering = ['-featured', '-is_national', 'group__name']
permissions = (
('can_edit_any_group', 'Can edit any group.'),
)
def __unicode__(self):
"""Convert group to a unicode string."""
return u'%s' % self.group.name
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""Override save to auto-set is_national."""
if not all([self.longitude, self.latitude, self.radius]):
self.is_national = True
else:
self.is_national = False
return super(Group, self).save(
force_insert, force_update, using, update_fields)
def delete(self, using=None):
"""Don't actually delete."""
self.status = 'deleted'
self.save()
for user in self.group.user_set.all().iterator():
remove_user_from_group.delay(user=user, group=self)
def get_absolute_url(self):
"""Get the full local URL of an object"""
return reverse('group_details', args=(self.pk,))
@property
def full_url(self):
"""The URL (including the origin) of the group detail page"""
return settings.ORIGIN + self.get_absolute_url()
def clean(self):
"""Custom group validation."""
required_together = [
self.latitude, self.longitude, self.radius]
if any(required_together) and not all(required_together):
raise ValidationError(
"If a location is specified, name, latitude,"
" longitude, and radius are required."
)
def get_members(self):
"""Return a queryset of all users in the group."""
return self.group.user_set.all()
def get_members_avatar_prioritized(self):
"""Return a queryset of group members prioritizing those with avatars"""
# Selecting null as an extra column and sorting on that column
# to preserve sorting when switching between MySQL and PostgreSQL.
return self.get_members().extra(
select={'image_null': 'image_id is null'}
).select_related('image').order_by('image_null')
def public_threads_by_user(self, user):
"""All approved threads sent to group that the user is allowed to see"""
return Thread.public.by_user(user).filter(group=self)
def public_threads(self):
"""All the threads sent to this group."""
return Thread.public.by_group(group=self)
@property
def unmoderated_messages(self):
"""
Return all unmoderated messages
"""
return Message.objects.filter(
thread__group=self, status='pending')
@property
def total_unmoderated_messages(self):
"""
Returns the total number of unmoderated messages
"""
return self.unmoderated_messages.count()
def images(self, user):
"""Returns popular images related to this group."""
# We need to defer the exif field with distinct or postgres punches
# you in the face. http://bit.ly/1k7HBs8
return Image.popular.with_user(
user=user
).filter(
message__thread__group=self
)
def links(self):
"""Returns popular links related to this group."""
return ShortenedURL.popular.filter(
message__thread__group=self,
message__status='approved')
def group_owners_changed(**kwargs):
"""
Handle changes in group ownership.
This could be broken out into 2 signal receivers, but that would involve
2 duplicate queries to the User table to get a list of changed owners
"""
# If this is a change in owners, grab the list of owners
if kwargs['action'] in ['post_add', 'post_remove']:
users = get_user_model().objects.filter(pk__in=kwargs['pk_set'])
# Clear the user's 'owned_groups' cache
for user in users:
cache.delete(user.cache_key + 'owned_groups')
# Make sure group owners can direct message all other users
if kwargs['action'] == 'post_add':
direct_message_permission = Permission.objects.get(
codename='can_initiate_direct_messages',
content_type__app_label='accounts')
for user in users:
user.user_permissions.add(direct_message_permission)
m2m_changed.connect(group_owners_changed, Group.owners.through)
class GroupRequestManager(models.Manager):
"""Manager for GroupRequest."""
def unapproved(self):
"""Get unapproved requests."""
return super(
GroupRequestManager, self
).get_queryset().filter(moderated_by__isnull=True)
class GroupRequest(TimestampModel):
"""GroupRequest model."""
user = models.ForeignKey(settings.AUTH_USER_MODEL)
group = models.ForeignKey(Group)
moderated_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name='approved_by'
)
moderated_at = models.DateTimeField(blank=True, null=True)
approved = models.NullBooleanField(blank=True)
objects = GroupRequestManager()
def __unicode__(self):
"""Convert GroupRequest to a unicode string."""
return mark_safe(
u'<a href="{url}">{name} ({email} / {state}, {zip_code})'
u' requested to join {group}.</a>'.format(
url=self.user.get_absolute_url(),
email=self.user.email,
state=self.user.state,
zip_code=self.user.zip_code,
name=self.user.get_real_name(),
group=self.group
)
)
| lpatmo/actionify_the_news | open_connect/groups/models.py | Python | mit | 12,456 |
import tensorflow as tf
'''
Model for sequence classification and localization with weighted loss
'''
class DeepLocalizationWeightedLossVariableLengthDeeper:
def get_name(self):
return "deep_localization_weighted_loss_variable_length_6"
def input_placeholders(self):
inputs_placeholder = tf.placeholder(tf.float32, shape=[None, 128, 256], name="inputs")
labels_placeholder = tf.placeholder(tf.float32, shape=[None, 5, 11], name="labels")
positions_placeholder = tf.placeholder(tf.float32, shape=[None, 4], name="positions")
keep_prob_placeholder = tf.placeholder(tf.float32)
is_training_placeholder = tf.placeholder(tf.bool)
return inputs_placeholder, labels_placeholder, positions_placeholder, keep_prob_placeholder, is_training_placeholder
def inference(self, input, keep_prob, is_training):
with tf.name_scope("inference"):
input = tf.reshape(input, [-1, 128, 256, 1])
conv1 = self._convolutional(input, [10, 10, 1, 8])
relu1 = self._relu(conv1)
max_pool1 = self._max_pooling(relu1, [1, 2, 2, 1], [1, 2, 2, 1])
conv2 = self._convolutional(max_pool1, [8, 8, 8, 14])
relu2 = self._relu(conv2)
max_pool2 = self._max_pooling(relu2, [1, 2, 2, 1], [1, 2, 2, 1])
conv3 = self._convolutional(max_pool2, [6, 6, 14, 20])
relu3 = self._relu(conv3)
max_pool3 = self._max_pooling(relu3, [1, 2, 2, 1], [1, 2, 2, 1])
conv4 = self._convolutional(max_pool3, [4, 4, 20, 24])
relu4 = self._relu(conv4)
max_pool4 = self._max_pooling(relu4, [1, 2, 2, 1], [1, 2, 2, 1])
conv5 = self._convolutional(max_pool4, [2, 2, 24, 32])
relu5 = self._relu(conv5)
max_pool5 = self._max_pooling(relu5, [1, 2, 2, 1], [1, 2, 2, 1])
conv6 = self._convolutional(max_pool5, [2, 2, 32, 128])
relu6 = self._relu(conv6)
max_pool6 = self._max_pooling(relu6, [1, 2, 2, 1], [1, 2, 2, 1])
reshaped = tf.reshape(max_pool6, [-1, 1024])
logits = []
gru = tf.contrib.rnn.GRUCell(576)
state = gru.zero_state(tf.shape(reshaped)[0], tf.float32)
with tf.variable_scope("RNN"):
for i in range(5):
if i > 0: tf.get_variable_scope().reuse_variables()
output, state = gru(reshaped, state)
number_logits = self._fully_connected(output, 576, 11)
logits.append(number_logits)
fc_position1 = self._fully_connected(reshaped, 1024, 768)
dropout_position_1 = tf.nn.dropout(fc_position1, keep_prob)
relu_position1 = self._relu(dropout_position_1)
fc_position2 = self._fully_connected(relu_position1, 768, 512)
dropout_position_2 = tf.nn.dropout(fc_position2, keep_prob)
relu_position2 = self._relu(dropout_position_2)
fc_position3 = self._fully_connected(relu_position2, 512, 256)
dropout_position_3 = tf.nn.dropout(fc_position3, keep_prob)
relu_position3 = self._relu(dropout_position_3)
fc_position4 = self._fully_connected(relu_position3, 256, 64)
dropout_position_4 = tf.nn.dropout(fc_position4, keep_prob)
relu_position4 = self._relu(dropout_position_4)
fc_position5 = self._fully_connected(relu_position4, 64, 32)
dropout_position_5 = tf.nn.dropout(fc_position5, keep_prob)
relu_position5 = self._relu(dropout_position_5)
predicted_positions = self._fully_connected(relu_position5, 32, 4)
return tf.stack(logits, axis=1), predicted_positions
def loss(self, logits, labels, predicted_positions, positions):
with tf.name_scope("loss"):
labels = tf.to_int64(labels)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits, name="cross_entropy")
logits_loss = tf.reduce_mean(cross_entropy, name="cross_entropy_mean")
square_error = tf.square(positions - predicted_positions, name="square_error")
position_loss = tf.reduce_mean(square_error, name="square_error_mean")
total_loss = 1000 * logits_loss + position_loss
tf.summary.scalar("logits_loss", logits_loss)
tf.summary.scalar("positions_loss", position_loss)
tf.summary.scalar("total_loss", logits_loss + position_loss)
return {"logits_loss": logits_loss, "positions_loss": position_loss,
"total_loss": total_loss}
def training(self, loss, learning_rate):
with tf.name_scope("training"):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_operation = optimizer.minimize(loss)
return train_operation
def evaluation(self, logits, labels, predicted_positions, positions):
with tf.name_scope("evaluation"):
labels = tf.to_int64(labels)
labels = tf.argmax(labels, 2)
logits = tf.argmax(logits, 2)
difference = tf.subtract(labels, logits, name="sub")
character_errors = tf.count_nonzero(difference, axis=1, name="count_nonzero")
total_wrong_characters = tf.reduce_sum(character_errors)
total_characters = tf.to_int64(tf.size(labels))
total_correct_characters = total_characters - total_wrong_characters
corrects = tf.less_equal(character_errors, 0, name="is_zero")
position_error = tf.losses.mean_squared_error(positions, predicted_positions)
return self.tf_count(corrects,
True), corrects, logits, position_error, predicted_positions, total_correct_characters, total_characters
def tf_count(self, t, val):
elements_equal_to_value = tf.equal(t, val)
as_ints = tf.cast(elements_equal_to_value, tf.int32)
count = tf.reduce_sum(as_ints)
return count
def _fully_connected(self, input, size_in, size_out, name="fc"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="b")
act = tf.matmul(input, w) + b
return act
def _convolutional(self, input, dimensions, name="conv"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal(dimensions, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[dimensions[3]]), name="b")
return tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding='SAME') + b
def _max_pooling(self, input, ksize, strides, name="max_pooling"):
with tf.name_scope(name):
return tf.nn.max_pool(input, ksize, strides, padding="SAME")
def _relu(self, input, name="relu"):
with tf.name_scope(name):
return tf.nn.relu(input)
| thePetrMarek/SequenceOfDigitsRecognition | sequences_of_variable_length/deep_localization_weighted_loss_variable_length_deeper.py | Python | mit | 7,054 |
"""
basic set of `jut run` tests
"""
import json
import unittest
from tests.util import jut
BAD_PROGRAM = 'foo'
BAD_PROGRAM_ERROR = 'Error line 1, column 1 of main: Error: no such sub: foo'
class JutRunTests(unittest.TestCase):
def test_jut_run_syntatically_incorrect_program_reports_error_with_format_json(self):
"""
verify an invalid program reports the failure correctly when using json
output format
"""
process = jut('run', BAD_PROGRAM, '-f', 'json')
process.expect_status(255)
process.expect_error(BAD_PROGRAM_ERROR)
def test_jut_run_syntatically_incorrect_program_reports_error_with_format_text(self):
"""
verify an invalid program reports the failure correctly when using text
output format
"""
process = jut('run', BAD_PROGRAM, '-f', 'text')
process.expect_status(255)
process.expect_error(BAD_PROGRAM_ERROR)
def test_jut_run_syntatically_incorrect_program_reports_error_with_format_csv(self):
"""
verify an invalid program reports the failure correctly when using csv
output format
"""
process = jut('run', BAD_PROGRAM, '-f', 'json')
process.expect_status(255)
process.expect_error(BAD_PROGRAM_ERROR)
def test_jut_run_emit_to_json(self):
"""
use jut to run the juttle program:
emit -from :2014-01-01T00:00:00.000Z: -limit 5
and verify the output is in the expected JSON format
"""
process = jut('run',
'emit -from :2014-01-01T00:00:00.000Z: -limit 5')
process.expect_status(0)
points = json.loads(process.read_output())
process.expect_eof()
self.assertEqual(points,
[
{'time': '2014-01-01T00:00:00.000Z'},
{'time': '2014-01-01T00:00:01.000Z'},
{'time': '2014-01-01T00:00:02.000Z'},
{'time': '2014-01-01T00:00:03.000Z'},
{'time': '2014-01-01T00:00:04.000Z'}
])
def test_jut_run_emit_to_text(self):
"""
use jut to run the juttle program:
emit -from :2014-01-01T00:00:00.000Z: -limit 5
and verify the output is in the expected text format
"""
process = jut('run',
'--format', 'text',
'emit -from :2014-01-01T00:00:00.000Z: -limit 5')
process.expect_status(0)
stdout = process.read_output()
process.expect_eof()
self.assertEqual(stdout, '2014-01-01T00:00:00.000Z\n'
'2014-01-01T00:00:01.000Z\n'
'2014-01-01T00:00:02.000Z\n'
'2014-01-01T00:00:03.000Z\n'
'2014-01-01T00:00:04.000Z\n')
def test_jut_run_emit_to_csv(self):
"""
use jut to run the juttle program:
emit -from :2014-01-01T00:00:00.000Z: -limit 5
and verify the output is in the expected csv format
"""
process = jut('run',
'--format', 'csv',
'emit -from :2014-01-01T00:00:00.000Z: -limit 5')
process.expect_status(0)
stdout = process.read_output()
process.expect_eof()
self.assertEqual(stdout, '#time\n'
'2014-01-01T00:00:00.000Z\n'
'2014-01-01T00:00:01.000Z\n'
'2014-01-01T00:00:02.000Z\n'
'2014-01-01T00:00:03.000Z\n'
'2014-01-01T00:00:04.000Z\n')
| jut-io/jut-python-tools | tests/jut_run_tests.py | Python | mit | 3,794 |
#! -*- coding: utf-8 -*-
"""
Web Scraper Project
Scrape data from a regularly updated website livingsocial.com and
save to a database (postgres).
Scrapy spider part - it actually performs scraping.
"""
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.loader import XPathItemLoader
from scrapy.contrib.loader.processor import Join, MapCompose
from scraper_app.items import LivingSocialDeal
class LivingSocialSpider(BaseSpider):
"""
Spider for regularly updated livingsocial.com site, San Francisco page
"""
name = "livingsocial"
allowed_domains = ["livingsocial.com"]
start_urls = ["https://www.livingsocial.com/cities/15-san-francisco"]
deals_list_xpath = '//li[@dealid]'
item_fields = {
'title': './/span[@itemscope]/meta[@itemprop="name"]/@content',
'link': './/a/@href',
'location': './/a/div[@class="deal-details"]/p[@class="location"]/text()',
'original_price': './/a/div[@class="deal-prices"]/div[@class="deal-strikethrough-price"]/div[@class="strikethrough-wrapper"]/text()',
'price': './/a/div[@class="deal-prices"]/div[@class="deal-price"]/text()',
'end_date': './/span[@itemscope]/meta[@itemprop="availabilityEnds"]/@content'
}
def parse(self, response):
"""
Default callback used by Scrapy to process downloaded responses
Testing contracts:
@url http://www.livingsocial.com/cities/15-san-francisco
@returns items 1
@scrapes title link
"""
selector = HtmlXPathSelector(response)
# iterate over deals
for deal in selector.xpath(self.deals_list_xpath):
loader = XPathItemLoader(LivingSocialDeal(), selector=deal)
# define processors
loader.default_input_processor = MapCompose(unicode.strip)
loader.default_output_processor = Join()
# iterate over fields and add xpaths to the loader
for field, xpath in self.item_fields.iteritems():
loader.add_xpath(field, xpath)
yield loader.load_item()
| enilsen16/python | scrape/living_social/scraper_app/spiders/livingsocial_spider.py | Python | mit | 2,134 |
from bs4 import BeautifulSoup
from models.course import Course
import requests
default_postdata = {
'CAMPUS': '0',
'TERMYEAR': '201709',
'CORE_CODE': 'AR%',
'subj_code': '',
'CRSE_NUMBER': '',
'crn': '',
'open_only': 'on',
'BTN_PRESSED': 'FIND class sections',
}
url = 'https://banweb.banner.vt.edu/ssb/prod/HZSKVTSC.P_ProcRequest'
def _get_open_courses(data):
req = requests.post(url, data=data)
soup = BeautifulSoup(req.content, 'html5lib')
rows = soup.select('table.dataentrytable tbody tr')
open_courses = list()
# The first row is the header row with the column labels
# If there's only one row, the rest of the table is empty, so there are no results
if len(rows) > 1:
rows = rows[1:]
for row in rows:
cells = row.select('td')
cells_text = list(map(lambda x: x.get_text(), cells))
crn = cells_text[0].strip()
label = cells_text[1].strip()
title = cells_text[2].strip()
professor = cells_text[6].strip()
open_courses.append(Course(crn, label, title, professor))
return open_courses
def get_open_courses_by_course(subj, num, semester):
""" Get the open courses that match the course subject and number passed in
:param subj: The subject abbreviation
:param num: The course number
:return: Returns a list of the open courses that are matched
"""
postdata = default_postdata.copy()
postdata['subj_code'] = subj.strip().upper()
postdata['CRSE_NUMBER'] = num.strip()
postdata['TERMYEAR'] = semester
return _get_open_courses(postdata)
def get_open_courses_by_crn(crn, semester):
""" Get the open course that matches the crn passed in
:param crn: The course request number of the course section
:return: Returns a list of the open courses that are matched
"""
postdata = default_postdata.copy()
postdata['crn'] = crn.strip()
postdata['TERMYEAR'] = semester
return _get_open_courses(postdata)
| amhokies/Timetable-Stalker | course_search.py | Python | mit | 2,048 |
# -*- coding: utf-8 -*-
#
# spikeplot - plot_cluster.py
#
# Philipp Meier <pmeier82 at googlemail dot com>
# 2011-09-29
#
"""scatter plot for clustering data"""
__docformat__ = 'restructuredtext'
__all__ = ['cluster']
##---IMPORTS
from .common import COLOURS, save_figure, check_plotting_handle, mpl, plt
##---FUNCTION
def cluster(data, data_dim=(0, 1), plot_handle=None, plot_mean=True,
colours=None, title=None, xlabel=None, ylabel=None, filename=None,
show=True):
"""plot a set of clusters with different colors each
:Parameters:
data : object
Preferably a dictionary with ndarray entries.
data_dim : tuple
A 2-tuple giving the dimension (entries per datapoint/columns) to
use for the scatter plot of the cluster.
plot_handle : figure or axis
A reference to a figure or axis, or None if one has to be created.
plot_mean : bool or float
If False, do nothing. If True or positive integer,
plot the cluster
means with a strong cross, if positive float, additionally plot a
unit circle of that radius (makes sense for prewhitened pca data),
thus interpreting the value as the std of the cluster.
Default=True
colours : list
List of colors in any matplotlib conform colour representation
Default=None
title : str
A title for the plot. No title if None or ''.
xlabel : str
A label for the x-axis. No label if None or ''.
ylabel : str
A label for the y-axis. No label if None or ''.
filename : str
It given and a valid path on the local system, save the figure.
show : bool
If True, show the figure.
:Returns:
matplotlib.figure
Reference th the figure plotted on
"""
# colour list
if colours is None:
col_lst = COLOURS
else:
col_lst = colours
# setup Figure if necessary
fig, ax = check_plotting_handle(plot_handle)
if not isinstance(data, dict):
data = {'0':data}
# plot single cluster members
col_idx = 0
for k in sorted(data.keys()):
ax.plot(
data[k][:, data_dim[0]],
data[k][:, data_dim[1]],
marker='.',
lw=0,
c=col_lst[col_idx % len(col_lst)])
col_idx += 1
# plot cluster means
if plot_mean is not False:
col_idx = 0
for k in sorted(data.keys()):
my_mean = data[k][:, data_dim].mean(axis=0)
ax.plot(
[my_mean[0]],
[my_mean[1]],
lw=0,
marker='x',
mfc=col_lst[col_idx % len(col_lst)],
ms=10,
mew=1,
mec='k')
# plot density estimates
if plot_mean is not True:
ax.add_artist(
mpl.patches.Ellipse(
xy=my_mean,
width=plot_mean * 2,
height=plot_mean * 2,
facecolor='none',
edgecolor=col_lst[col_idx % len(col_lst)]))
col_idx += 1
# fancy stuff
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# produce plots
if filename is not None:
save_figure(fig, filename, '')
if show is True:
plt.show()
return fig
##---MAIN
if __name__ == '__main__':
pass
| pmeier82/SpikePlot | spikeplot/plot_cluster.py | Python | mit | 3,737 |
"""Ask Admin URL."""
from django.conf.urls.defaults import url, patterns
urlpatterns = patterns('',
url(r'^my_question/$',
'apps.widgets.AskedQuestions.views.send_question', name="ask_que_question"),
)
| vijayanandau/KnowledgeShare | makahiki/apps/widgets/AskedQuestions/urls.py | Python | mit | 216 |
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
setup(name='antk',
version=0.3,
description='Automated Neural-graph Toolkit: A Tensorflow wrapper for '
'common deep learning tasks and rapid development of innovative'
'models. Developed at Hutch Research, Western Washington University.'
'Support for multiple input and output neural network graphs. '
'Model visualizations and extensively documented interface. '
'Explore tensorflow functionality and deep learning fundamentals.',
url='http://aarontuor.xyz',
author='Aaron Tuor',
author_email='tuora@students.wwu.edu',
license='none',
packages=find_packages(),
zip_safe=False,
install_requires=['scipy', 'numpy', 'tensorflow'],
classifiers=[
'Programming Language :: Python',
'Operating System :: Unix',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries'],
keywords=[
'Deep Learning',
'Neural Networks',
'TensorFlow',
'Machine Learning',
'Western Washington University',
'Recommender Systems'])
| aarontuor/antk | setup.py | Python | mit | 1,606 |
from __future__ import print_function
from __future__ import unicode_literals
import io
import os.path
import pipes
import sys
from pre_commit import output
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_filename
# This is used to identify the hook file we install
PRIOR_HASHES = (
'4d9958c90bc262f47553e2c073f14cfe',
'd8ee923c46731b42cd95cc869add4062',
'49fd668cb42069aa1b6048464be5d395',
'79f09a650522a87b0da915d0d983b2de',
'e358c9dae00eac5d06b38dfdb1e33a8c',
)
CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
def is_our_script(filename):
if not os.path.exists(filename):
return False
contents = io.open(filename).read()
return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
def install(
runner, overwrite=False, hooks=False, hook_type='pre-commit',
skip_on_missing_conf=False,
):
"""Install the pre-commit hooks."""
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
mkdirp(os.path.dirname(hook_path))
# If we have an existing hook, move it to pre-commit.legacy
if os.path.lexists(hook_path) and not is_our_script(hook_path):
os.rename(hook_path, legacy_path)
# If we specify overwrite, we simply delete the legacy file
if overwrite and os.path.exists(legacy_path):
os.remove(legacy_path)
elif os.path.exists(legacy_path):
output.write_line(
'Running in migration mode with existing hooks at {}\n'
'Use -f to use only pre-commit.'.format(
legacy_path,
),
)
with io.open(hook_path, 'w') as pre_commit_file_obj:
if hook_type == 'pre-push':
with io.open(resource_filename('pre-push-tmpl')) as f:
hook_specific_contents = f.read()
elif hook_type == 'commit-msg':
with io.open(resource_filename('commit-msg-tmpl')) as f:
hook_specific_contents = f.read()
elif hook_type == 'pre-commit':
hook_specific_contents = ''
else:
raise AssertionError('Unknown hook type: {}'.format(hook_type))
skip_on_missing_conf = 'true' if skip_on_missing_conf else 'false'
contents = io.open(resource_filename('hook-tmpl')).read().format(
sys_executable=pipes.quote(sys.executable),
hook_type=hook_type,
hook_specific=hook_specific_contents,
config_file=runner.config_file,
skip_on_missing_conf=skip_on_missing_conf,
)
pre_commit_file_obj.write(contents)
make_executable(hook_path)
output.write_line('pre-commit installed at {}'.format(hook_path))
# If they requested we install all of the hooks, do so.
if hooks:
install_hooks(runner)
return 0
def install_hooks(runner):
for repository in runner.repositories:
repository.require_installed()
def uninstall(runner, hook_type='pre-commit'):
"""Uninstall the pre-commit hooks."""
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
# If our file doesn't exist or it isn't ours, gtfo.
if not os.path.exists(hook_path) or not is_our_script(hook_path):
return 0
os.remove(hook_path)
output.write_line('{} uninstalled'.format(hook_type))
if os.path.exists(legacy_path):
os.rename(legacy_path, hook_path)
output.write_line('Restored previous hooks to {}'.format(hook_path))
return 0
| Lucas-C/pre-commit | pre_commit/commands/install_uninstall.py | Python | mit | 3,549 |
#!/usr/bin/python
#
# Manage JSON database of Micro-BESM opcodes.
#
import sys, json, codecs
# Check parameters.
if len(sys.argv) != 2:
print "Usage:"
print " opcode [option] file.json"
print "Options:"
print " TODO"
sys.exit(1)
opcode = [] # List of all opcodes
#
# Process the input file.
#
def main(filename):
read_data(filename)
write_results("output.json")
#
# Load opcode[] from JSON file.
#
def read_data(filename):
global opcode
try:
file = open(filename)
opcode = json.load(file)
file.close()
except:
print "Fatal error: Cannot load file '" + filename + "'"
sys.exit(1)
print "Load file '"+filename+"':",
print "%d opcodes" % len(opcode)
#print "Opcodes:", opcode
#
# Write the data to another JSON file.
#
def write_results(filename):
file = codecs.open(filename, 'w', encoding="utf-8")
json.dump(opcode, file, indent=4, sort_keys=True, ensure_ascii=False)
file.close()
print "Write file %s: %d opcodes" % (filename, len(opcode))
if __name__ == "__main__":
main(sys.argv[1])
| besm6/micro-besm | doc/opcodes/opcode.py | Python | mit | 1,130 |
from django.contrib import admin
from django.contrib.contenttypes import generic
from models import Attribute, BaseModel
from django.utils.translation import ugettext_lazy as _
class MetaInline(generic.GenericTabularInline):
model = Attribute
extra = 0
class BaseAdmin(admin.ModelAdmin):
"""
def get_readonly_fields(self, request, obj=None):
fs = super(BaseAdmin, self).get_readonly_fields(request, obj)
fs += ('created_by', 'last_updated_by',)
return fs
def get_fieldsets(self, request, obj=None):
fs = super(BaseAdmin, self).get_fieldsets(request, obj)
fs[0][1]['fields'].remove('created_by')
fs[0][1]['fields'].remove('last_updated_by')
fs.extend([(_('Other informations'), {'fields':['created_by','last_updated_by'], 'classes':['collapse']})])
return fs
def changelist_view(self, request, extra_context=None):
if request.user.has_perm('%s.can_view_deleted' % self.model._meta.app_label):
if not "deleted_flag" in self.list_filter:
self.list_filter += ("deleted_flag",)
return super(BaseAdmin, self).changelist_view(request, extra_context)
def queryset(self, request):
return super(BaseAdmin, self).queryset(request).exclude(deleted_flag=True)
"""
def save_model(self, request, obj, form, change):
if not change:
obj.created_by = request.user
obj.last_updated_by = request.user
obj.save()
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
for instance in instances:
if isinstance(instance, BaseModel): #Check if it is the correct type of inline
if not instance.created_by_id:
instance.created_by = request.user
instance.last_updated_by = request.user
instance.save()
| Mercy-Nekesa/sokoapp | sokoapp/utils/admin.py | Python | mit | 1,954 |
from __future__ import absolute_import, unicode_literals
from .base import *
DEBUG = False
TEMPLATE_DEBUG = False
COMPRESS_OFFLINE = True
AWS_STORAGE_BUCKET_NAME = get_env_variable("AWS_STORAGE_BUCKET_NAME")
AWS_S3_CUSTOM_DOMAIN = '{}.s3.amazonaws.com'.format(AWS_STORAGE_BUCKET_NAME)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_URL = 'https://{}.s3.amazonaws.com/'.format(AWS_STORAGE_BUCKET_NAME)
STATIC_URL = 'https://{}.s3.amazonaws.com/'.format(AWS_STORAGE_BUCKET_NAME)
MEDIA_URL = 'https://{}.s3.amazonaws.com/'.format(AWS_STORAGE_BUCKET_NAME)
RAVEN_CONFIG = {
'dsn': get_env_variable('RAVEN_DSN'),
}
INSTALLED_APPS = INSTALLED_APPS + (
'raven.contrib.django.raven_compat',
)
FAVICON_PATH = STATIC_URL + 'img/favicon.png'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': get_env_variable('DB_NAME'),
'USER': get_env_variable('DB_USER'),
'PASSWORD': get_env_variable('DB_PASSWORD'),
'HOST': get_env_variable('DB_HOST'),
'PORT': get_env_variable('DB_PORT'),
'CONN_MAX_AGE': 600,
}
}
try:
from .local import *
except ImportError:
pass
| OpenCanada/lindinitiative | lindinitiative/settings/production.py | Python | mit | 1,319 |
from syhelpers.log import print_error, print_table_terminal
class ModuleBase:
"""
Base class for all modules (handler, transport, platform, ...)
Allows functions for options
"""
def __init__(self):
"""
Initialize Module, should be overwritten and options should be added
"""
self.options = {}
def completeoption(self, name):
"""
lists autocomplete for option names starting with name
:param name: start of the option name
:return: list of possible autocompletes
"""
return [o for o in self.options.keys() if o.startswith(str(name).upper())]
def completeoptionvalue(self, name, value):
"""
lists autocomplete for option values starting with value of the option name
:param name: name of the option
:param value: start of the option value
:return: list of possible autocompletes
"""
if str(name).upper() not in self.options:
return []
if "Options" not in self.options[str(name).upper()]:
return []
return [v for v in self.options[str(name).upper()]['Options'] if v.startswith(str(value).upper())]
def setoption(self, name, value):
"""
Sets option <name> to value <value> if possible.
Can be overwritten and expanded by modules.
"""
# name and value must be set and must be string
if not name or not isinstance(name, str):
print_error("Option name not understood")
return False
if not value or not isinstance(value, str):
print_error("Option value not understood")
return False
# check whether there is an option of that name
if name and isinstance(name, str) and name.upper() in self.options:
values = self.options[name.upper()]
# if it is an option with fixed values, check whether the value matches
if 'Options' in values and values['Options'] and not(value.upper() in values['Options']):
print_error(str(name.upper())+" must be one of "+(", ".join(values['Options'])))
return True # ok, strange, but True only means we found it, even if setting failed
elif 'Options' in values and values['Options']:
# and if so, set the value to upper case
value = value.upper()
# finally set the value
self.options[name.upper()]['Value'] = value
return True
else:
# no option of that name here
# no error now, module should catch that
return False
def validate_options(self):
"""
Validate all currently set module options.
Can be overwritten and expanded by modules.
"""
valid = True
# check for each option
for option, values in self.options.items():
# make sure all options are set
if values['Required'] and not(values['Value']) or (values['Value'] == ''):
print_error(str(option)+" must be set")
valid = False
# make sure all options with listed alternatives are correct
if 'Options' in values and values['Options'] and not(values['Value'] in values['Options']):
print_error(str(option)+" must be one of "+(", ".join(values['Options'])))
valid = False
return valid
def show_options(self):
"""
print information of options for this module
:return: None
"""
headers = ["Name", "Value", "Required", "Description"]
data = []
for name in self.options.keys():
value = str(self.options[name]["Value"]) if self.options[name]["Value"] else ""
required = str(self.options[name]["Required"])
description = str(self.options[name]["Description"])
if "Options" in self.options[name]:
description += " (Options: " + ", ".join(self.options[name]["Options"]) + ")"
data.append([str(name), value, required, description])
if data:
print_table_terminal(data, headers=headers)
else:
print("NO OPTIONS FOUND")
| SySS-Research/outis | syhelpers/modulebase.py | Python | mit | 4,301 |
import re
def pythonize_camelcase_name(name):
"""
GetProperty -> get_property
"""
def repl(match):
return '_' + match.group(0).lower()
s = re.sub(r'([A-Z])', repl, name)
if s.startswith('_'):
return s[1:]
else:
return s
| fredreichbier/babbisch-ooc | babbisch_ooc/wraplib/utils.py | Python | mit | 279 |
try:
from hashlib import md5
except ImportError:
from md5 import md5
class ratingMiddleware(object):
def process_request(self, request):
request.rating_token = self.generate_token(request)
def generate_token(self, request):
raise NotImplementedError
class ratingIpMiddleware(ratingMiddleware):
def generate_token(self, request):
return request.META['REMOTE_ADDR']
class ratingIpUseragentMiddleware(ratingMiddleware):
def generate_token(self, request):
s = ''.join((request.META['REMOTE_ADDR'], request.META['HTTP_USER_AGENT']))
return md5(s).hexdigest()
| contactr2m/remote_repo | src/rating/middleware.py | Python | mit | 625 |
"""
Graph isomorphism functions.
"""
import networkx as nx
from networkx.exception import NetworkXError
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Christopher Ellison cellison@cse.ucdavis.edu)'])
# Copyright (C) 2004-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['could_be_isomorphic',
'fast_could_be_isomorphic',
'faster_could_be_isomorphic',
'is_isomorphic']
def could_be_isomorphic(G1, G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree, triangle, and number of cliques sequences.
"""
# Check global properties
if G1.order() != G2.order():
return False
# Check local properties
d1 = G1.degree()
t1 = nx.triangles(G1)
c1 = nx.number_of_cliques(G1)
props1 = [[d, t1[v], c1[v]] for v, d in d1]
props1.sort()
d2 = G2.degree()
t2 = nx.triangles(G2)
c2 = nx.number_of_cliques(G2)
props2 = [[d, t2[v], c2[v]] for v, d in d2]
props2.sort()
if props1 != props2:
return False
# OK...
return True
graph_could_be_isomorphic = could_be_isomorphic
def fast_could_be_isomorphic(G1, G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree and triangle sequences.
"""
# Check global properties
if G1.order() != G2.order():
return False
# Check local properties
d1 = G1.degree()
t1 = nx.triangles(G1)
props1 = [[d, t1[v]] for v, d in d1]
props1.sort()
d2 = G2.degree()
t2 = nx.triangles(G2)
props2 = [[d, t2[v]] for v, d in d2]
props2.sort()
if props1 != props2:
return False
# OK...
return True
fast_graph_could_be_isomorphic = fast_could_be_isomorphic
def faster_could_be_isomorphic(G1, G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree sequences.
"""
# Check global properties
if G1.order() != G2.order():
return False
# Check local properties
d1 = sorted(d for n, d in G1.degree())
d2 = sorted(d for n, d in G2.degree())
if d1 != d2:
return False
# OK...
return True
faster_graph_could_be_isomorphic = faster_could_be_isomorphic
def is_isomorphic(G1, G2, node_match=None, edge_match=None):
"""Returns True if the graphs G1 and G2 are isomorphic and False otherwise.
Parameters
----------
G1, G2: graphs
The two graphs G1 and G2 must be the same type.
node_match : callable
A function that returns True if node n1 in G1 and n2 in G2 should
be considered equal during the isomorphism test.
If node_match is not specified then node attributes are not considered.
The function will be called like
node_match(G1.nodes[n1], G2.nodes[n2]).
That is, the function will receive the node attribute dictionaries
for n1 and n2 as inputs.
edge_match : callable
A function that returns True if the edge attribute dictionary
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
be considered equal during the isomorphism test. If edge_match is
not specified then edge attributes are not considered.
The function will be called like
edge_match(G1[u1][v1], G2[u2][v2]).
That is, the function will receive the edge attribute dictionaries
of the edges under consideration.
Notes
-----
Uses the vf2 algorithm [1]_.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
For digraphs G1 and G2, using 'weight' edge attribute (default: 1)
>>> G1 = nx.DiGraph()
>>> G2 = nx.DiGraph()
>>> nx.add_path(G1, [1,2,3,4], weight=1)
>>> nx.add_path(G2, [10,20,30,40], weight=2)
>>> em = iso.numerical_edge_match('weight', 1)
>>> nx.is_isomorphic(G1, G2) # no weights considered
True
>>> nx.is_isomorphic(G1, G2, edge_match=em) # match weights
False
For multidigraphs G1 and G2, using 'fill' node attribute (default: '')
>>> G1 = nx.MultiDiGraph()
>>> G2 = nx.MultiDiGraph()
>>> G1.add_nodes_from([1,2,3], fill='red')
>>> G2.add_nodes_from([10,20,30,40], fill='red')
>>> nx.add_path(G1, [1,2,3,4], weight=3, linewidth=2.5)
>>> nx.add_path(G2, [10,20,30,40], weight=3)
>>> nm = iso.categorical_node_match('fill', 'red')
>>> nx.is_isomorphic(G1, G2, node_match=nm)
True
For multidigraphs G1 and G2, using 'weight' edge attribute (default: 7)
>>> G1.add_edge(1,2, weight=7)
1
>>> G2.add_edge(10,20)
1
>>> em = iso.numerical_multiedge_match('weight', 7, rtol=1e-6)
>>> nx.is_isomorphic(G1, G2, edge_match=em)
True
For multigraphs G1 and G2, using 'weight' and 'linewidth' edge attributes
with default values 7 and 2.5. Also using 'fill' node attribute with
default value 'red'.
>>> em = iso.numerical_multiedge_match(['weight', 'linewidth'], [7, 2.5])
>>> nm = iso.categorical_node_match('fill', 'red')
>>> nx.is_isomorphic(G1, G2, edge_match=em, node_match=nm)
True
See Also
--------
numerical_node_match, numerical_edge_match, numerical_multiedge_match
categorical_node_match, categorical_edge_match, categorical_multiedge_match
References
----------
.. [1] L. P. Cordella, P. Foggia, C. Sansone, M. Vento,
"An Improved Algorithm for Matching Large Graphs",
3rd IAPR-TC15 Workshop on Graph-based Representations in
Pattern Recognition, Cuen, pp. 149-159, 2001.
http://amalfi.dis.unina.it/graph/db/papers/vf-algorithm.pdf
"""
if G1.is_directed() and G2.is_directed():
GM = nx.algorithms.isomorphism.DiGraphMatcher
elif (not G1.is_directed()) and (not G2.is_directed()):
GM = nx.algorithms.isomorphism.GraphMatcher
else:
raise NetworkXError("Graphs G1 and G2 are not of the same type.")
gm = GM(G1, G2, node_match=node_match, edge_match=edge_match)
return gm.is_isomorphic()
| sserrot/champion_relationships | venv/Lib/site-packages/networkx/algorithms/isomorphism/isomorph.py | Python | mit | 6,757 |
from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(User)
description = models.TextField(blank=True, null=True)
| axiome-oss/dive-into-django-i18n | your_project/your_package/models.py | Python | mit | 199 |
from django.contrib import admin
from polls.models import Choice, Poll
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question','pub_date')
list_filter = ['pub_date']
search_fields = ['question']
date_hierarchy = 'pub_date'
admin.site.register(Poll, PollAdmin)
| damiencalloway/djtut | mysite/polls/admin.py | Python | mit | 570 |
# !/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Vincent<vincent8280@outlook.com>
# http://wax8280.github.io
# Created on 18-2-12 上午2:34
# !/usr/bin/env python
import datetime
import os
import re
import time
from copy import deepcopy
from queue import Queue, PriorityQueue
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from web2kindle import MAIN_CONFIG, CONFIG_ZHIHU_DAILY
from web2kindle.libs.content_formating import format_zhihu_content
from web2kindle.libs.crawler import Crawler, RetryDownload, Task
from web2kindle.libs.db import ArticleDB
from web2kindle.libs.html2kindle import HTML2Kindle
from web2kindle.libs.send_email import SendEmail2Kindle
from web2kindle.libs.utils import write, md5string, load_config, check_config, get_next_datetime_string, \
compare_datetime_string, get_datetime_string, make_crawler_meta
from web2kindle.libs.log import Log
__all__ = ["main"]
DESC = {
'script_args': {'script_name': 'zhihu_daily',
'script_introduction': '获取知乎日报',
'i': False,
'start': True,
'img': True,
'gif': True,
'email': True, },
'script_config': {
'script_name': 'zhihu_daily',
'configs': [{
'config_name': 'SAVE_PATH',
'config_introduction': "保存路径名",
'default': '',
'requried': False
},
{
'config_name': 'HEADER',
'config_introduction': "请求头部",
'default': '',
'requried': False
},
]
},
'kw': "[{name: 'window', default: 50, select: null}],"
}
SCRIPT_CONFIG = load_config(CONFIG_ZHIHU_DAILY)
LOG = Log("zhihu_daily")
DEFAULT_HEADERS = {
'User-Agent': 'DailyApi/4 (Linux; Android 4.4.2; SM-T525 Build/samsung/picassoltezs/picassolte/KOT49H/zh_CN) '
'Google-HTTP-Java-Client/1.22.0 (gzip) Google-HTTP-Java-Client/1.22.0 (gzip)'
}
ARTICLE_ID_SET = set()
TODAY_URL = 'http://news-at.zhihu.com/api/4/stories/latest'
# http://http://news-at.zhihu.com/api/4/stories/before/20180212
YESTERDAY_URL = 'http://news-at.zhihu.com/api/4/stories/before/{}'
IS_TODAY_URL = True
META = make_crawler_meta(SCRIPT_CONFIG.get('HEADER', {}),
['referer', 'connection', 'accept-encoding', 'If-None-Match', 'host', 'X-CSRF-Token'])
HTML_PARSER_NAME = 'lxml'
def main(start=None, end=None, img=True, gif=False, email=False, **kw):
# start:20120101
kw.update({
'img': img,
'gif': gif,
'email': email,
})
zhihu_daily_main(start, end, kw)
def zhihu_daily_main(start, end, kw):
LOG.logd("META:{}".format(META))
LOG.logd("SCRIPT_CONFIG:{}".format(SCRIPT_CONFIG))
check_config(MAIN_CONFIG, SCRIPT_CONFIG, 'SAVE_PATH', LOG)
iq = PriorityQueue()
oq = PriorityQueue()
result_q = Queue()
crawler = Crawler(iq, oq, result_q, MAIN_CONFIG.get('PARSER_WORKER', 1), MAIN_CONFIG.get('DOWNLOADER_WORKER', 1),
MAIN_CONFIG.get('RESULTER_WORKER', 1))
new_header = deepcopy(DEFAULT_HEADERS)
global IS_TODAY_URL
if start is None:
IS_TODAY_URL = True
save_path = os.path.join(SCRIPT_CONFIG['SAVE_PATH'], 'zhihu_daily_' + get_datetime_string('%Y%m%d'))
book_name = '知乎日报_' + get_datetime_string('%Y%m%d')
else:
if end is None:
end = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d')
save_path = os.path.join(SCRIPT_CONFIG['SAVE_PATH'], 'zhihu_daily_{}_{}'.format(start, end))
book_name = '知乎日报_{}_{}'.format(start, end)
IS_TODAY_URL = False
url = TODAY_URL if IS_TODAY_URL else YESTERDAY_URL.format(start)
task = Task.make_task({
'url': url,
'method': 'GET',
'meta': {'headers': new_header, 'verify': False},
'parser': parser_list,
'priority': 0,
'save': {'cursor': start,
'save_path': save_path,
'start': start,
'end': end,
'kw': kw},
'retry': 99,
'retry_delay': 10
})
iq.put(task)
# Init DB
with ArticleDB(save_path, VERSION=0) as db:
db.insert_meta_data(['BOOK_NAME', book_name])
_ = db.select_all_article_id()
if _:
for each in _:
ARTICLE_ID_SET.add(each[0])
crawler.start()
items = []
with ArticleDB(save_path, VERSION=0) as db:
items.extend(db.select_article())
db.increase_version()
db.reset()
if items:
new = True
with HTML2Kindle(items, save_path, book_name, MAIN_CONFIG.get('KINDLEGEN_PATH')) as html2kindle:
html2kindle.make_metadata(window=kw.get('window', 50))
html2kindle.make_book_multi(save_path)
else:
LOG.log_it('无新项目', 'INFO')
new = False
if new and kw.get('email'):
with SendEmail2Kindle() as s:
s.send_all_mobi(os.path.join(save_path))
def parser_list(task):
response = task['response']
new_tasks = []
to_next = True
if not response:
raise RetryDownload
try:
data = response.json()['stories']
except Exception as e:
LOG.log_it('解析JSON出错(如一直出现,而且浏览器能正常访问网站,可能是网站代码升级,请通知开发者。)ERRINFO:{}'
.format(str(e)), 'WARN')
raise RetryDownload
for item in data:
# 如果在数据库里面已经存在的项目,就不继续爬了
url = 'http://news-at.zhihu.com/api/4/story/' + str(item['id'])
if md5string(url) in ARTICLE_ID_SET:
to_next = False
continue
new_task = Task.make_task({
'url': url,
'method': 'GET',
'meta': task['meta'],
'parser': parser_content,
'resulter': resulter_content,
'priority': 5,
'save': task['save'],
'title': item['title'],
})
new_tasks.append(new_task)
# 下一页
if not IS_TODAY_URL and to_next:
next_datetime = get_next_datetime_string(task['save']['cursor'], '%Y%m%d', 1)
# 始终会到相等的时候
if compare_datetime_string(task['save']['end'], next_datetime, '%Y%m%d') and len(data) != 0:
next_page_task = deepcopy(task)
next_page_task.update(
{'url': re.sub('before/\d+', 'before/{}'.format(next_datetime),
next_page_task['url'])})
next_page_task['save'].update({'cursor': next_datetime})
new_tasks.append(next_page_task)
return None, new_tasks
def parser_content(task):
title = task['title']
new_tasks = []
response = task['response']
if not response:
raise RetryDownload
try:
content = response.json()['body']
except Exception as e:
LOG.log_it('解析JSON出错(如一直出现,而且浏览器能正常访问网站,可能是网站代码升级,请通知开发者。)ERRINFO:{}'
.format(str(e)), 'WARN')
raise RetryDownload
bs = BeautifulSoup(content, HTML_PARSER_NAME)
content = str(bs.select('div.content')[0])
author_name = bs.select('.author')[0].string if bs.select('.author') else ''
voteup_count = ''
created_time = ''
article_url = task['url']
download_img_list, content = format_zhihu_content(content, task, HTML_PARSER_NAME)
item = [md5string(article_url), title, content, created_time, voteup_count, author_name,
int(time.time() * 100000)]
if task['save']['kw'].get('img', True):
img_header = deepcopy(DEFAULT_HEADERS)
img_header.update({'Referer': response.url})
for img_url in download_img_list:
new_tasks.append(Task.make_task({
'url': img_url,
'method': 'GET',
'meta': {'headers': img_header, 'verify': False},
'parser': parser_downloader_img,
'resulter': resulter_downloader_img,
'save': task['save'],
'priority': 10,
}))
task.update({"parsed_data": item})
return task, new_tasks
def resulter_content(task):
LOG.log_it("正在将任务 {} 插入数据库".format(task['tid']), 'INFO')
with ArticleDB(task['save']['save_path']) as article_db:
article_db.insert_article(task['parsed_data'])
def parser_downloader_img(task):
return task, None
"""
在convert_link函数里面md5(url),然后转换成本地链接
在resulter_downloader_img函数里面,将下载回来的公式,根据md5(url)保存为文件名
"""
def resulter_downloader_img(task):
if 'www.zhihu.com/equation' not in task['url']:
write(os.path.join(task['save']['save_path'], 'static'), urlparse(task['response'].url).path[1:],
task['response'].content, mode='wb')
else:
write(os.path.join(task['save']['save_path'], 'static'), md5string(task['url']) + '.svg',
task['response'].content,
mode='wb')
| wax8280/web2kindle | web2kindle/script/zhihu_daily.py | Python | mit | 9,297 |
#print("OK, imported signal handling")
from registration.signals import user_registered
import os
from django_maps2 import settings
#This callback is listening for user to register. Creates directories for user images.
def postRegistration(sender,user,request, **kwargs):
media_root = settings.USER_MEDIA_ROOT
if not os.path.exists(media_root +"/"+ str(user.pk)):
os.mkdir(media_root + "/" + str(user.pk))
os.mkdir(media_root + "/" + str(user.pk)+"/profile_pictures")
#print("hello, got user registration signal")
user_registered.connect(postRegistration)
| agilman/django_maps2 | maps/signals.py | Python | mit | 601 |