repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ansible/ansible
|
refs/heads/devel
|
lib/ansible/plugins/netconf/__init__.py
|
15
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import abstractmethod
from functools import wraps
from ansible.errors import AnsibleError
from ansible.plugins import AnsiblePlugin
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import missing_required_lib
try:
from ncclient.operations import RPCError
from ncclient.xml_ import to_xml, to_ele, NCElement
HAS_NCCLIENT = True
NCCLIENT_IMP_ERR = None
# paramiko and gssapi are incompatible and raise AttributeError not ImportError
# When running in FIPS mode, cryptography raises InternalError
# https://bugzilla.redhat.com/show_bug.cgi?id=1778939
except Exception as err:
HAS_NCCLIENT = False
NCCLIENT_IMP_ERR = err
try:
from lxml.etree import Element, SubElement, tostring, fromstring
except ImportError:
from xml.etree.ElementTree import Element, SubElement, tostring, fromstring
def ensure_ncclient(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not HAS_NCCLIENT:
raise AnsibleError("%s: %s" % (missing_required_lib('ncclient'), to_native(NCCLIENT_IMP_ERR)))
return func(self, *args, **kwargs)
return wrapped
class NetconfBase(AnsiblePlugin):
"""
A base class for implementing Netconf connections
.. note:: Unlike most of Ansible, nearly all strings in
:class:`TerminalBase` plugins are byte strings. This is because of
how close to the underlying platform these plugins operate. Remember
to mark literal strings as byte string (``b"string"``) and to use
:func:`~ansible.module_utils._text.to_bytes` and
:func:`~ansible.module_utils._text.to_text` to avoid unexpected
problems.
List of supported rpc's:
:get: Retrieves running configuration and device state information
:get_config: Retrieves the specified configuration from the device
:edit_config: Loads the specified commands into the remote device
:commit: Load configuration from candidate to running
:discard_changes: Discard changes to candidate datastore
:validate: Validate the contents of the specified configuration.
:lock: Allows the client to lock the configuration system of a device.
:unlock: Release a configuration lock, previously obtained with the lock operation.
:copy_config: create or replace an entire configuration datastore with the contents of another complete
configuration datastore.
:get-schema: Retrieves the required schema from the device
:get_capabilities: Retrieves device information and supported rpc methods
For JUNOS:
:execute_rpc: RPC to be execute on remote device
:load_configuration: Loads given configuration on device
Note: rpc support depends on the capabilites of remote device.
:returns: Returns output received from remote device as byte string
Note: the 'result' or 'error' from response should to be converted to object
of ElementTree using 'fromstring' to parse output as xml doc
'get_capabilities()' returns 'result' as a json string.
Usage:
from ansible.module_utils.connection import Connection
conn = Connection()
data = conn.execute_rpc(rpc)
reply = fromstring(reply)
data = conn.get_capabilities()
json.loads(data)
conn.load_configuration(config=[''set system ntp server 1.1.1.1''], action='set', format='text')
"""
__rpc__ = ['rpc', 'get_config', 'get', 'edit_config', 'validate', 'copy_config', 'dispatch', 'lock', 'unlock',
'discard_changes', 'commit', 'get_schema', 'delete_config', 'get_device_operations']
def __init__(self, connection):
super(NetconfBase, self).__init__()
self._connection = connection
@property
def m(self):
return self._connection.manager
def rpc(self, name):
"""
RPC to be execute on remote device
:param name: Name of rpc in string format
:return: Received rpc response from remote host
"""
try:
obj = to_ele(name)
resp = self.m.rpc(obj)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
except RPCError as exc:
msg = exc.xml
raise Exception(to_xml(msg))
def get_config(self, source=None, filter=None):
"""
Retrieve all or part of a specified configuration
(by default entire configuration is retrieved).
:param source: Name of the configuration datastore being queried, defaults to running datastore
:param filter: This argument specifies the portion of the configuration data to retrieve
:return: Returns xml string containing the RPC response received from remote host
"""
if isinstance(filter, list):
filter = tuple(filter)
if not source:
source = 'running'
resp = self.m.get_config(source=source, filter=filter)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def get(self, filter=None, with_defaults=None):
"""
Retrieve device configuration and state information.
:param filter: This argument specifies the portion of the state data to retrieve
(by default entire state data is retrieved)
:param with_defaults: defines an explicit method of retrieving default values
from the configuration
:return: Returns xml string containing the RPC response received from remote host
"""
if isinstance(filter, list):
filter = tuple(filter)
resp = self.m.get(filter=filter, with_defaults=with_defaults)
response = resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
return response
def edit_config(self, config=None, format='xml', target='candidate', default_operation=None, test_option=None, error_option=None):
"""
Loads all or part of the specified *config* to the *target* configuration datastore.
:param config: Is the configuration, which must be rooted in the `config` element.
It can be specified either as a string or an :class:`~xml.etree.ElementTree.Element`.
:param format: The format of configuration eg. xml, text
:param target: Is the name of the configuration datastore being edited
:param default_operation: If specified must be one of { `"merge"`, `"replace"`, or `"none"` }
:param test_option: If specified must be one of { `"test_then_set"`, `"set"` }
:param error_option: If specified must be one of { `"stop-on-error"`, `"continue-on-error"`, `"rollback-on-error"` }
The `"rollback-on-error"` *error_option* depends on the `:rollback-on-error` capability.
:return: Returns xml string containing the RPC response received from remote host
"""
if config is None:
raise ValueError('config value must be provided')
resp = self.m.edit_config(config, format=format, target=target, default_operation=default_operation, test_option=test_option,
error_option=error_option)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def validate(self, source='candidate'):
"""
Validate the contents of the specified configuration.
:param source: Is the name of the configuration datastore being validated or `config` element
containing the configuration subtree to be validated
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.validate(source=source)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def copy_config(self, source, target):
"""
Create or replace an entire configuration datastore with the contents of another complete configuration datastore.
:param source: Is the name of the configuration datastore to use as the source of the copy operation or `config`
element containing the configuration subtree to copy
:param target: Is the name of the configuration datastore to use as the destination of the copy operation
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.copy_config(source, target)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def dispatch(self, rpc_command=None, source=None, filter=None):
"""
Execute rpc on the remote device eg. dispatch('clear-arp-table')
:param rpc_command: specifies rpc command to be dispatched either in plain text or in xml element format (depending on command)
:param source: name of the configuration datastore being queried
:param filter: specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
:return: Returns xml string containing the RPC response received from remote host
"""
if rpc_command is None:
raise ValueError('rpc_command value must be provided')
resp = self.m.dispatch(fromstring(rpc_command), source=source, filter=filter)
if isinstance(resp, NCElement):
# In case xml reply is transformed or namespace is removed in
# ncclient device specific handler return modified xml response
result = resp.data_xml
elif hasattr(resp, 'data_ele') and resp.data_ele:
# if data node is present in xml response return the xml string
# with data node as root
result = resp.data_xml
else:
# return raw xml string received from host with rpc-reply as the root node
result = resp.xml
return result
def lock(self, target="candidate"):
"""
Allows the client to lock the configuration system of a device.
:param target: is the name of the configuration datastore to lock,
defaults to candidate datastore
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.lock(target=target)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def unlock(self, target="candidate"):
"""
Release a configuration lock, previously obtained with the lock operation.
:param target: is the name of the configuration datastore to unlock,
defaults to candidate datastore
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.unlock(target=target)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def discard_changes(self):
"""
Revert the candidate configuration to the currently running configuration.
Any uncommitted changes are discarded.
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.discard_changes()
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def commit(self, confirmed=False, timeout=None, persist=None):
"""
Commit the candidate configuration as the device's new current configuration.
Depends on the `:candidate` capability.
A confirmed commit (i.e. if *confirmed* is `True`) is reverted if there is no
followup commit within the *timeout* interval. If no timeout is specified the
confirm timeout defaults to 600 seconds (10 minutes).
A confirming commit may have the *confirmed* parameter but this is not required.
Depends on the `:confirmed-commit` capability.
:param confirmed: whether this is a confirmed commit
:param timeout: specifies the confirm timeout in seconds
:param persist: make the confirmed commit survive a session termination,
and set a token on the ongoing confirmed commit
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.commit(confirmed=confirmed, timeout=timeout, persist=persist)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def get_schema(self, identifier=None, version=None, format=None):
"""
Retrieve a named schema, with optional revision and type.
:param identifier: name of the schema to be retrieved
:param version: version of schema to get
:param format: format of the schema to be retrieved, yang is the default
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.get_schema(identifier, version=version, format=format)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def delete_config(self, target):
"""
delete a configuration datastore
:param target: specifies the name or URL of configuration datastore to delete
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.delete_config(target)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def locked(self, target):
return self.m.locked(target)
@abstractmethod
def get_capabilities(self):
"""
Retrieves device information and supported
rpc methods by device platform and return result
as a string
:return: Netconf session capability
"""
pass
@staticmethod
def guess_network_os(obj):
"""
Identifies the operating system of network device.
:param obj: ncclient manager connection instance
:return: The name of network operating system.
"""
pass
def get_base_rpc(self):
"""
Returns list of base rpc method supported by remote device
:return: List of RPC supported
"""
return self.__rpc__
def put_file(self, source, destination):
"""
Copies file to remote host
:param source: Source location of file
:param destination: Destination file path
:return: Returns xml string containing the RPC response received from remote host
"""
pass
def fetch_file(self, source, destination):
"""
Fetch file from remote host
:param source: Source location of file
:param destination: Source location of file
:return: Returns xml string containing the RPC response received from remote host
"""
pass
def get_device_operations(self, server_capabilities):
"""
Retrieve remote host capability from Netconf server hello message.
:param server_capabilities: Server capabilities received during Netconf session initialization
:return: Remote host capabilities in dictionary format
"""
operations = {}
capabilities = '\n'.join(server_capabilities)
operations['supports_commit'] = ':candidate' in capabilities
operations['supports_defaults'] = ':with-defaults' in capabilities
operations['supports_confirm_commit'] = ':confirmed-commit' in capabilities
operations['supports_startup'] = ':startup' in capabilities
operations['supports_xpath'] = ':xpath' in capabilities
operations['supports_writable_running'] = ':writable-running' in capabilities
operations['supports_validate'] = ':validate' in capabilities
operations['lock_datastore'] = []
if operations['supports_writable_running']:
operations['lock_datastore'].append('running')
if operations['supports_commit']:
operations['lock_datastore'].append('candidate')
if operations['supports_startup']:
operations['lock_datastore'].append('startup')
operations['supports_lock'] = bool(operations['lock_datastore'])
return operations
# TODO Restore .xml, when ncclient supports it for all platforms
|
peterlauri/django
|
refs/heads/master
|
tests/timezones/admin.py
|
146
|
from django.contrib import admin
from .models import Event, Timestamp
class EventAdmin(admin.ModelAdmin):
list_display = ('dt',)
class TimestampAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'updated')
site = admin.AdminSite(name='admin_tz')
site.register(Event, EventAdmin)
site.register(Timestamp, TimestampAdmin)
|
gangadharkadam/office_erp
|
refs/heads/develop
|
erpnext/setup/page/setup_wizard/install_fixtures.py
|
35
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def install(country=None):
records = [
# address template
{'doctype':"Address Template", "country": country},
# item group
{'doctype': 'Item Group', 'item_group_name': _('All Item Groups'),
'is_group': 'Yes', 'parent_item_group': ''},
{'doctype': 'Item Group', 'item_group_name': _('Products'),
'is_group': 'No', 'parent_item_group': _('All Item Groups'), "show_in_website": 1 },
{'doctype': 'Item Group', 'item_group_name': _('Raw Material'),
'is_group': 'No', 'parent_item_group': _('All Item Groups') },
{'doctype': 'Item Group', 'item_group_name': _('Services'),
'is_group': 'No', 'parent_item_group': _('All Item Groups') },
{'doctype': 'Item Group', 'item_group_name': _('Sub Assemblies'),
'is_group': 'No', 'parent_item_group': _('All Item Groups') },
{'doctype': 'Item Group', 'item_group_name': _('Consumable'),
'is_group': 'No', 'parent_item_group': _('All Item Groups') },
# deduction type
{'doctype': 'Deduction Type', 'name': _('Income Tax'), 'description': _('Income Tax'), 'deduction_name': _('Income Tax')},
# earning type
{'doctype': 'Earning Type', 'name': _('Basic'), 'description': _('Basic'), 'earning_name': _('Basic'), 'taxable': 'Yes'},
# expense claim type
{'doctype': 'Expense Claim Type', 'name': _('Calls'), 'expense_type': _('Calls')},
{'doctype': 'Expense Claim Type', 'name': _('Food'), 'expense_type': _('Food')},
{'doctype': 'Expense Claim Type', 'name': _('Medical'), 'expense_type': _('Medical')},
{'doctype': 'Expense Claim Type', 'name': _('Others'), 'expense_type': _('Others')},
{'doctype': 'Expense Claim Type', 'name': _('Travel'), 'expense_type': _('Travel')},
# leave type
{'doctype': 'Leave Type', 'leave_type_name': _('Casual Leave'), 'name': _('Casual Leave'), 'is_encash': 1, 'is_carry_forward': 1, 'max_days_allowed': '3', },
{'doctype': 'Leave Type', 'leave_type_name': _('Compensatory Off'), 'name': _('Compensatory Off'), 'is_encash': 0, 'is_carry_forward': 0, },
{'doctype': 'Leave Type', 'leave_type_name': _('Sick Leave'), 'name': _('Sick Leave'), 'is_encash': 0, 'is_carry_forward': 0, },
{'doctype': 'Leave Type', 'leave_type_name': _('Privilege Leave'), 'name': _('Privilege Leave'), 'is_encash': 0, 'is_carry_forward': 0, },
{'doctype': 'Leave Type', 'leave_type_name': _('Leave Without Pay'), 'name': _('Leave Without Pay'), 'is_encash': 0, 'is_carry_forward': 0, 'is_lwp':1},
# Employment Type
{'doctype': 'Employment Type', 'employee_type_name': _('Full-time')},
{'doctype': 'Employment Type', 'employee_type_name': _('Part-time')},
{'doctype': 'Employment Type', 'employee_type_name': _('Probation')},
{'doctype': 'Employment Type', 'employee_type_name': _('Contract')},
{'doctype': 'Employment Type', 'employee_type_name': _('Commission')},
{'doctype': 'Employment Type', 'employee_type_name': _('Piecework')},
{'doctype': 'Employment Type', 'employee_type_name': _('Intern')},
{'doctype': 'Employment Type', 'employee_type_name': _('Apprentice')},
# Department
{'doctype': 'Department', 'department_name': _('Accounts')},
{'doctype': 'Department', 'department_name': _('Marketing')},
{'doctype': 'Department', 'department_name': _('Sales')},
{'doctype': 'Department', 'department_name': _('Purchase')},
{'doctype': 'Department', 'department_name': _('Operations')},
{'doctype': 'Department', 'department_name': _('Production')},
{'doctype': 'Department', 'department_name': _('Dispatch')},
{'doctype': 'Department', 'department_name': _('Customer Service')},
{'doctype': 'Department', 'department_name': _('Human Resources')},
{'doctype': 'Department', 'department_name': _('Management')},
{'doctype': 'Department', 'department_name': _('Quality Management')},
{'doctype': 'Department', 'department_name': _('Research & Development')},
{'doctype': 'Department', 'department_name': _('Legal')},
# Designation
{'doctype': 'Designation', 'designation_name': _('CEO')},
{'doctype': 'Designation', 'designation_name': _('Manager')},
{'doctype': 'Designation', 'designation_name': _('Analyst')},
{'doctype': 'Designation', 'designation_name': _('Engineer')},
{'doctype': 'Designation', 'designation_name': _('Accountant')},
{'doctype': 'Designation', 'designation_name': _('Secretary')},
{'doctype': 'Designation', 'designation_name': _('Associate')},
{'doctype': 'Designation', 'designation_name': _('Administrative Officer')},
{'doctype': 'Designation', 'designation_name': _('Business Development Manager')},
{'doctype': 'Designation', 'designation_name': _('HR Manager')},
{'doctype': 'Designation', 'designation_name': _('Project Manager')},
{'doctype': 'Designation', 'designation_name': _('Head of Marketing and Sales')},
{'doctype': 'Designation', 'designation_name': _('Software Developer')},
{'doctype': 'Designation', 'designation_name': _('Designer')},
{'doctype': 'Designation', 'designation_name': _('Assistant')},
{'doctype': 'Designation', 'designation_name': _('Researcher')},
# territory
{'doctype': 'Territory', 'territory_name': _('All Territories'), 'is_group': 'Yes', 'name': _('All Territories'), 'parent_territory': ''},
# customer group
{'doctype': 'Customer Group', 'customer_group_name': _('All Customer Groups'), 'is_group': 'Yes', 'name': _('All Customer Groups'), 'parent_customer_group': ''},
{'doctype': 'Customer Group', 'customer_group_name': _('Individual'), 'is_group': 'No', 'parent_customer_group': _('All Customer Groups')},
{'doctype': 'Customer Group', 'customer_group_name': _('Commercial'), 'is_group': 'No', 'parent_customer_group': _('All Customer Groups')},
{'doctype': 'Customer Group', 'customer_group_name': _('Non Profit'), 'is_group': 'No', 'parent_customer_group': _('All Customer Groups')},
{'doctype': 'Customer Group', 'customer_group_name': _('Government'), 'is_group': 'No', 'parent_customer_group': _('All Customer Groups')},
# supplier type
{'doctype': 'Supplier Type', 'supplier_type': _('Services')},
{'doctype': 'Supplier Type', 'supplier_type': _('Local')},
{'doctype': 'Supplier Type', 'supplier_type': _('Raw Material')},
{'doctype': 'Supplier Type', 'supplier_type': _('Electrical')},
{'doctype': 'Supplier Type', 'supplier_type': _('Hardware')},
{'doctype': 'Supplier Type', 'supplier_type': _('Pharmaceutical')},
{'doctype': 'Supplier Type', 'supplier_type': _('Distributor')},
# Sales Person
{'doctype': 'Sales Person', 'sales_person_name': _('Sales Team'), 'is_group': "Yes", "parent_sales_person": ""},
# UOM
{'uom_name': _('Unit'), 'doctype': 'UOM', 'name': _('Unit'), "must_be_whole_number": 1},
{'uom_name': _('Box'), 'doctype': 'UOM', 'name': _('Box'), "must_be_whole_number": 1},
{'uom_name': _('Kg'), 'doctype': 'UOM', 'name': _('Kg')},
{'uom_name': _('Nos'), 'doctype': 'UOM', 'name': _('Nos'), "must_be_whole_number": 1},
{'uom_name': _('Pair'), 'doctype': 'UOM', 'name': _('Pair'), "must_be_whole_number": 1},
{'uom_name': _('Set'), 'doctype': 'UOM', 'name': _('Set'), "must_be_whole_number": 1},
{'uom_name': _('Hour'), 'doctype': 'UOM', 'name': _('Hour')},
{'uom_name': _('Minute'), 'doctype': 'UOM', 'name': _('Minute')},
# Mode of Payment
{'doctype': 'Mode of Payment', 'mode_of_payment': 'Check' if country=="United States" else _('Cheque')},
{'doctype': 'Mode of Payment', 'mode_of_payment': _('Cash')},
{'doctype': 'Mode of Payment', 'mode_of_payment': _('Credit Card')},
{'doctype': 'Mode of Payment', 'mode_of_payment': _('Wire Transfer')},
{'doctype': 'Mode of Payment', 'mode_of_payment': _('Bank Draft')},
# Activity Type
{'doctype': 'Activity Type', 'activity_type': _('Planning')},
{'doctype': 'Activity Type', 'activity_type': _('Research')},
{'doctype': 'Activity Type', 'activity_type': _('Proposal Writing')},
{'doctype': 'Activity Type', 'activity_type': _('Execution')},
{'doctype': 'Activity Type', 'activity_type': _('Communication')},
# Industry Type
{'doctype': 'Industry Type', 'industry': _('Accounting')},
{'doctype': 'Industry Type', 'industry': _('Advertising')},
{'doctype': 'Industry Type', 'industry': _('Aerospace')},
{'doctype': 'Industry Type', 'industry': _('Agriculture')},
{'doctype': 'Industry Type', 'industry': _('Airline')},
{'doctype': 'Industry Type', 'industry': _('Apparel & Accessories')},
{'doctype': 'Industry Type', 'industry': _('Automotive')},
{'doctype': 'Industry Type', 'industry': _('Banking')},
{'doctype': 'Industry Type', 'industry': _('Biotechnology')},
{'doctype': 'Industry Type', 'industry': _('Broadcasting')},
{'doctype': 'Industry Type', 'industry': _('Brokerage')},
{'doctype': 'Industry Type', 'industry': _('Chemical')},
{'doctype': 'Industry Type', 'industry': _('Computer')},
{'doctype': 'Industry Type', 'industry': _('Consulting')},
{'doctype': 'Industry Type', 'industry': _('Consumer Products')},
{'doctype': 'Industry Type', 'industry': _('Cosmetics')},
{'doctype': 'Industry Type', 'industry': _('Defense')},
{'doctype': 'Industry Type', 'industry': _('Department Stores')},
{'doctype': 'Industry Type', 'industry': _('Education')},
{'doctype': 'Industry Type', 'industry': _('Electronics')},
{'doctype': 'Industry Type', 'industry': _('Energy')},
{'doctype': 'Industry Type', 'industry': _('Entertainment & Leisure')},
{'doctype': 'Industry Type', 'industry': _('Executive Search')},
{'doctype': 'Industry Type', 'industry': _('Financial Services')},
{'doctype': 'Industry Type', 'industry': _('Food, Beverage & Tobacco')},
{'doctype': 'Industry Type', 'industry': _('Grocery')},
{'doctype': 'Industry Type', 'industry': _('Health Care')},
{'doctype': 'Industry Type', 'industry': _('Internet Publishing')},
{'doctype': 'Industry Type', 'industry': _('Investment Banking')},
{'doctype': 'Industry Type', 'industry': _('Legal')},
{'doctype': 'Industry Type', 'industry': _('Manufacturing')},
{'doctype': 'Industry Type', 'industry': _('Motion Picture & Video')},
{'doctype': 'Industry Type', 'industry': _('Music')},
{'doctype': 'Industry Type', 'industry': _('Newspaper Publishers')},
{'doctype': 'Industry Type', 'industry': _('Online Auctions')},
{'doctype': 'Industry Type', 'industry': _('Pension Funds')},
{'doctype': 'Industry Type', 'industry': _('Pharmaceuticals')},
{'doctype': 'Industry Type', 'industry': _('Private Equity')},
{'doctype': 'Industry Type', 'industry': _('Publishing')},
{'doctype': 'Industry Type', 'industry': _('Real Estate')},
{'doctype': 'Industry Type', 'industry': _('Retail & Wholesale')},
{'doctype': 'Industry Type', 'industry': _('Securities & Commodity Exchanges')},
{'doctype': 'Industry Type', 'industry': _('Service')},
{'doctype': 'Industry Type', 'industry': _('Soap & Detergent')},
{'doctype': 'Industry Type', 'industry': _('Software')},
{'doctype': 'Industry Type', 'industry': _('Sports')},
{'doctype': 'Industry Type', 'industry': _('Technology')},
{'doctype': 'Industry Type', 'industry': _('Telecommunications')},
{'doctype': 'Industry Type', 'industry': _('Television')},
{'doctype': 'Industry Type', 'industry': _('Transportation')},
{'doctype': 'Industry Type', 'industry': _('Venture Capital')}
]
from frappe.modules import scrub
for r in records:
doc = frappe.new_doc(r.get("doctype"))
doc.update(r)
# ignore mandatory for root
parent_link_field = ("parent_" + scrub(doc.doctype))
if doc.meta.get_field(parent_link_field) and not doc.get(parent_link_field):
doc.ignore_mandatory = True
doc.insert()
|
samsu/neutron
|
refs/heads/master
|
tests/unit/services/metering/test_metering_plugin.py
|
6
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.api.v2 import attributes as attr
from neutron.common import constants as n_constants
from neutron.common import topics
from neutron import context
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db
from neutron.db.metering import metering_rpc
from neutron.extensions import l3 as ext_l3
from neutron.extensions import metering as ext_metering
from neutron import manager
from neutron.openstack.common import timeutils
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests.unit.db.metering import test_db_metering
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_l3_plugin
_uuid = uuidutils.generate_uuid
METERING_SERVICE_PLUGIN_KLASS = (
"neutron.services.metering."
"metering_plugin.MeteringPlugin"
)
class MeteringTestExtensionManager(object):
def get_resources(self):
attr.RESOURCE_ATTRIBUTE_MAP.update(ext_metering.RESOURCE_ATTRIBUTE_MAP)
attr.RESOURCE_ATTRIBUTE_MAP.update(ext_l3.RESOURCE_ATTRIBUTE_MAP)
l3_res = ext_l3.L3.get_resources()
metering_res = ext_metering.Metering.get_resources()
return l3_res + metering_res
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestMeteringPlugin(test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin,
test_db_metering.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self):
plugin = 'neutron.tests.unit.test_l3_plugin.TestL3NatIntPlugin'
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPlugin, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
uuid = 'neutron.openstack.common.uuidutils.generate_uuid'
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
fanout = ('neutron.common.rpc.RpcProxy.fanout_cast')
self.fanout_patch = mock.patch(fanout)
self.mock_fanout = self.fanout_patch.start()
self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
self.ctx = context.Context('', self.tenant_id, is_admin=True)
self.context_patch = mock.patch('neutron.context.Context',
return_value=self.ctx)
self.mock_context = self.context_patch.start()
self.topic = 'metering_agent'
def test_add_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = {'args': {'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
self.mock_uuid.return_value = second_uuid
with self.router(name='router2', tenant_id=tenant_id_2,
set_context=True):
self.mock_uuid.return_value = self.uuid
with self.router(name='router1', tenant_id=self.tenant_id,
set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_fanout.assert_called_with(self.ctx, expected)
def test_add_metering_label_shared_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = {'args': {'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid},
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
with self.router(name='router1', tenant_id=self.tenant_id,
set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_uuid.return_value = second_uuid
with self.metering_label(tenant_id=tenant_id_2, shared=True,
set_context=True):
self.mock_fanout.assert_called_with(self.ctx, expected)
def test_remove_metering_label_rpc_call(self):
expected = {'args':
{'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_fanout.assert_called_with(self.ctx, expected)
expected['method'] = 'remove_metering_label'
self.mock_fanout.assert_called_with(self.ctx, expected)
def test_remove_one_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = {'args':
{'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid},
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
expected_remove = {'args':
{'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'remove_metering_label'}
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_uuid.return_value = second_uuid
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_fanout.assert_called_with(self.ctx, expected_add)
self.mock_fanout.assert_called_with(self.ctx, expected_remove)
def test_update_metering_label_rules_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = {'args':
{'routers': [
{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [
{'remote_ip_prefix': '10.0.0.0/24',
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': self.uuid},
{'remote_ip_prefix': '10.0.0.0/24',
'direction': 'egress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid}],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'update_metering_label_rules'}
expected_del = {'args':
{'routers': [
{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [
{'remote_ip_prefix': '10.0.0.0/24',
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': self.uuid}],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'update_metering_label_rules'}
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True) as label:
l = label['metering_label']
with self.metering_label_rule(l['id']):
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(l['id'], direction='egress'):
self.mock_fanout.assert_called_with(self.ctx,
expected_add)
self.mock_fanout.assert_called_with(self.ctx,
expected_del)
def test_delete_metering_label_does_not_clear_router_tenant_id(self):
tenant_id = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
with self.metering_label(tenant_id=tenant_id,
do_delete=False) as metering_label:
with self.router(tenant_id=tenant_id, set_context=True) as r:
router = self._show('routers', r['router']['id'])
self.assertEqual(tenant_id, router['router']['tenant_id'])
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204)
router = self._show('routers', r['router']['id'])
self.assertEqual(tenant_id, router['router']['tenant_id'])
class TestMeteringPluginL3AgentScheduler(
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin,
test_db_metering.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self, plugin_str=None, service_plugins=None, scheduler=None):
if not plugin_str:
plugin_str = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatIntAgentSchedulingPlugin')
if not service_plugins:
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
if not scheduler:
scheduler = plugin_str
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPluginL3AgentScheduler,
self).setUp(plugin=plugin_str, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
uuid = 'neutron.openstack.common.uuidutils.generate_uuid'
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
cast = 'neutron.common.rpc.RpcProxy.cast'
self.cast_patch = mock.patch(cast)
self.mock_cast = self.cast_patch.start()
self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
self.ctx = context.Context('', self.tenant_id, is_admin=True)
self.context_patch = mock.patch('neutron.context.Context',
return_value=self.ctx)
self.mock_context = self.context_patch.start()
self.l3routers_patch = mock.patch(scheduler +
'.get_l3_agents_hosting_routers')
self.l3routers_mock = self.l3routers_patch.start()
self.topic = 'metering_agent'
def test_add_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected1 = {'args': {'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
expected2 = {'args': {'routers': [{'status': 'ACTIVE',
'name': 'router2',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': second_uuid}]},
'namespace': None,
'method': 'add_metering_label'}
# bind each router to a specific agent
agent1 = agents_db.Agent(host='agent1')
agent2 = agents_db.Agent(host='agent2')
agents = {self.uuid: agent1,
second_uuid: agent2}
def side_effect(context, routers, admin_state_up, active):
return [agents[routers[0]]]
self.l3routers_mock.side_effect = side_effect
with self.router(name='router1', tenant_id=self.tenant_id,
set_context=True):
self.mock_uuid.return_value = second_uuid
with self.router(name='router2', tenant_id=self.tenant_id,
set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
topic1 = "%s.%s" % (self.topic, 'agent1')
topic2 = "%s.%s" % (self.topic, 'agent2')
# check if there is a call per agent
expected = [mock.call(self.ctx, expected1, topic=topic1),
mock.call(self.ctx, expected2, topic=topic2)]
self.mock_cast.assert_has_calls(expected, any_order=True)
class TestMeteringPluginL3AgentSchedulerServicePlugin(
TestMeteringPluginL3AgentScheduler):
"""Unit tests for the case where separate service plugin
implements L3 routing.
"""
def setUp(self):
l3_plugin = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatAgentSchedulingServicePlugin')
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS,
'l3_plugin_name': l3_plugin}
plugin_str = ('neutron.tests.unit.test_l3_plugin.'
'TestNoL3NatPlugin')
super(TestMeteringPluginL3AgentSchedulerServicePlugin, self).setUp(
plugin_str=plugin_str, service_plugins=service_plugins,
scheduler=l3_plugin)
class TestMeteringPluginRpcFromL3Agent(
test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin,
test_db_metering.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP
)
def setUp(self):
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
plugin = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatIntAgentSchedulingPlugin')
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPluginRpcFromL3Agent,
self).setUp(plugin=plugin, service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.meter_plugin = manager.NeutronManager.get_service_plugins().get(
constants.METERING)
self.tenant_id = 'admin_tenant_id'
self.tenant_id_1 = 'tenant_id_1'
self.tenant_id_2 = 'tenant_id_2'
self.adminContext = context.get_admin_context()
self._register_l3_agent('agent1')
def _register_l3_agent(self, host):
agent = {
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {},
'agent_type': n_constants.AGENT_TYPE_L3,
'start_flag': True
}
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent},
time=timeutils.strtime())
def test_get_sync_data_metering(self):
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
with self.router(name='router1', subnet=subnet) as router:
r = router['router']
self._add_external_gateway_to_router(r['id'], s['network_id'])
with self.metering_label(tenant_id=r['tenant_id']):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext,
host='agent1')
self.assertEqual('router1', data[0]['name'])
self._register_l3_agent('agent2')
data = callbacks.get_sync_data_metering(self.adminContext,
host='agent2')
self.assertFalse(data)
self._remove_external_gateway_from_router(
r['id'], s['network_id'])
def test_get_sync_data_metering_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
with self.metering_label(tenant_id=self.tenant_id,
shared=True):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
routers = [router['name'] for router in data]
self.assertIn('router1', routers)
self.assertIn('router2', routers)
def test_get_sync_data_metering_not_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
with self.metering_label(tenant_id=self.tenant_id):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
routers = [router['name'] for router in data]
self.assertEqual([], routers)
|
stonegithubs/odoo
|
refs/heads/8.0
|
addons/account_check_writing/wizard/account_check_batch_printing.py
|
339
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import fields, osv
class account_check_write(osv.osv_memory):
_name = 'account.check.write'
_description = 'Prin Check in Batch'
_columns = {
'check_number': fields.integer('Next Check Number', required=True, help="The number of the next check number to be printed."),
}
def _get_next_number(self, cr, uid, context=None):
dummy, sequence_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_check_writing', 'sequence_check_number')
return self.pool.get('ir.sequence').read(cr, uid, [sequence_id], ['number_next'])[0]['number_next']
_defaults = {
'check_number': _get_next_number,
}
def print_check_write(self, cr, uid, ids, context=None):
if context is None:
context = {}
voucher_obj = self.pool.get('account.voucher')
ir_sequence_obj = self.pool.get('ir.sequence')
#update the sequence to number the checks from the value encoded in the wizard
dummy, sequence_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_check_writing', 'sequence_check_number')
increment = ir_sequence_obj.read(cr, uid, [sequence_id], ['number_increment'])[0]['number_increment']
new_value = self.browse(cr, uid, ids[0], context=context).check_number
ir_sequence_obj.write(cr, uid, sequence_id, {'number_next': new_value})
#validate the checks so that they get a number
voucher_ids = context.get('active_ids', [])
for check in voucher_obj.browse(cr, uid, voucher_ids, context=context):
new_value += increment
if check.number:
raise osv.except_osv(_('Error!'),_("One of the printed check already got a number."))
voucher_obj.proforma_voucher(cr, uid, voucher_ids, context=context)
#update the sequence again (because the assignation using next_val was made during the same transaction of
#the first update of sequence)
ir_sequence_obj.write(cr, uid, sequence_id, {'number_next': new_value})
#print the checks
data = {
'id': voucher_ids and voucher_ids[0],
'ids': voucher_ids,
}
return self.pool['report'].get_action(
cr, uid, [], 'account_check_writing.report_check', data=data, context=context
)
|
salguarnieri/intellij-community
|
refs/heads/master
|
python/testData/formatter/spaceWithinDeclarationParentheses.py
|
54
|
def foo(x, y):
pass
|
denys-duchier/Scolar
|
refs/heads/master
|
sco_page_etud.py
|
1
|
# -*- mode: python -*-
# -*- coding: iso8859-15 -*-
##############################################################################
#
# Gestion scolarite IUT
#
# Copyright (c) 2001 - 2013 Emmanuel Viennet. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Emmanuel Viennet emmanuel.viennet@viennet.net
#
##############################################################################
"""ScoDoc ficheEtud
Fiche description d'un étudiant et de son parcours
"""
from sco_utils import *
from notesdb import *
import scolars
import sco_photos
import sco_groups
from scolars import format_telephone, format_pays, make_etud_args
from sco_formsemestre_status import makeMenu
from sco_bulletins import etud_descr_situation_semestre
import sco_parcours_dut
from sco_formsemestre_validation import formsemestre_recap_parcours_table
import sco_archives_etud
def _menuScolarite(context, authuser, sem, etudid):
"""HTML pour menu "scolarite" pour un etudiant dans un semestre.
Le contenu du menu depend des droits de l'utilisateur et de l'état de l'étudiant.
"""
locked = (sem['etat'] != '1')
if locked:
lockicon = context.icons.lock32_img.tag(title="verrouillé", border='0')
return lockicon # no menu
if not authuser.has_permission(ScoEtudInscrit,context) and not authuser.has_permission(ScoEtudChangeGroups,context):
return '' # no menu
ins = sem['ins']
args = { 'etudid' : etudid,
'formsemestre_id' : ins['formsemestre_id'] }
if ins['etat'] != 'D':
dem_title = 'Démission'
dem_url = 'formDem?etudid=%(etudid)s&formsemestre_id=%(formsemestre_id)s' % args
else:
dem_title = 'Annuler la démission'
dem_url = 'doCancelDem?etudid=%(etudid)s&formsemestre_id=%(formsemestre_id)s' % args
# Note: seul un etudiant inscrit (I) peut devenir défaillant.
if ins['etat'] != 'DEF':
def_title = 'Déclarer défaillance'
def_url = 'formDef?etudid=%(etudid)s&formsemestre_id=%(formsemestre_id)s' % args
elif ins['etat'] == 'DEF':
def_title = 'Annuler la défaillance'
def_url = 'doCancelDef?etudid=%(etudid)s&formsemestre_id=%(formsemestre_id)s' % args
def_enabled = (ins['etat'] != 'D') and authuser.has_permission(ScoEtudInscrit,context) and not locked
items = [
# { 'title' : 'Changer de groupe',
# 'url' : 'formChangeGroup?etudid=%s&formsemestre_id=%s' % (etudid,ins['formsemestre_id']),
# 'enabled' : authuser.has_permission(ScoEtudChangeGroups,context) and not locked,
# },
{ 'title' : dem_title,
'url' : dem_url,
'enabled' : authuser.has_permission(ScoEtudInscrit,context) and not locked
},
{ 'title' : "Validation du semestre (jury)",
'url' : "Notes/formsemestre_validation_etud_form?etudid=%(etudid)s&formsemestre_id=%(formsemestre_id)s" % args,
'enabled' : authuser.has_permission(ScoEtudInscrit,context) and not locked
},
{ 'title' : def_title,
'url' : def_url,
'enabled' : def_enabled
},
{ 'title' : "Inscrire à un module optionnel (ou au sport)",
'url' : "Notes/formsemestre_inscription_option?formsemestre_id=%(formsemestre_id)s&etudid=%(etudid)s" % args,
'enabled' : authuser.has_permission(ScoEtudInscrit,context) and not locked
},
{ 'title' : "Désinscrire (en cas d'erreur)",
'url' : "Notes/formsemestre_desinscription?formsemestre_id=%(formsemestre_id)s&etudid=%(etudid)s" % args,
'enabled' : authuser.has_permission(ScoEtudInscrit,context) and not locked
},
{ 'title' : "Inscrire à un autre semestre",
'url' : "Notes/formsemestre_inscription_with_modules_form?etudid=%(etudid)s" % args,
'enabled' : authuser.has_permission(ScoEtudInscrit,context)
},
]
return makeMenu( "Scolarité", items, cssclass="direction_etud", elem='span' )
def ficheEtud(context, etudid=None, REQUEST=None):
"fiche d'informations sur un etudiant"
authuser = REQUEST.AUTHENTICATED_USER
cnx = context.GetDBConnexion()
args = make_etud_args(etudid=etudid,REQUEST=REQUEST)
etuds = scolars.etudident_list(cnx, args)
if not etuds:
raise ScoValueError('Etudiant inexistant !')
etud = etuds[0]
etudid = etud['etudid']
context.fillEtudsInfo([etud])
#
info = etud
info['ScoURL'] = context.ScoURL()
info['authuser'] = authuser
info['info_naissance'] = info['date_naissance']
if info['lieu_naissance']:
info['info_naissance'] += ' à ' + info['lieu_naissance']
info['etudfoto'] = sco_photos.etud_photo_html(context, etud, REQUEST=REQUEST)
if ((not info['domicile']) and (not info['codepostaldomicile'])
and (not info['villedomicile'])):
info['domicile'] ='<em>inconnue</em>'
if info['paysdomicile']:
pays = format_pays(info['paysdomicile'])
if pays:
info['paysdomicile'] = '(%s)' % pays
else:
info['paysdomicile'] = ''
if info['telephone'] or info['telephonemobile']:
info['telephones'] = '<br/>%s %s' % (info['telephonestr'],
info['telephonemobilestr'])
else:
info['telephones'] = ''
# champs dependant des permissions
if authuser.has_permission(ScoEtudChangeAdr,context):
info['modifadresse'] = '<a class="stdlink" href="formChangeCoordonnees?etudid=%s">modifier adresse</a>' % etudid
else:
info['modifadresse'] = ''
# Groupes:
sco_groups.etud_add_group_infos(context, info, info['cursem'])
# Parcours de l'étudiant
if info['sems']:
info['last_formsemestre_id'] = info['sems'][0]['formsemestre_id']
else:
info['last_formsemestre_id'] = ''
sem_info={}
for sem in info['sems']:
if sem['ins']['etat'] != 'I':
descr, junk = etud_descr_situation_semestre(context.Notes, etudid, sem['formsemestre_id'], info['ne'], show_date_inscr=False)
grlink = '<span class="fontred">%s</span>' % descr['situation']
else:
group = sco_groups.get_etud_main_group(context, etudid, sem)
if group['partition_name']:
gr_name = group['group_name']
else:
gr_name = 'tous'
grlink = '<a class="discretelink" href="group_list?group_id=%s" title="Liste du groupe">groupe %s</a>' % (group['group_id'], gr_name)
# infos ajoutées au semestre dans le parcours (groupe, menu)
menu = _menuScolarite(context, authuser, sem, etudid)
if menu:
sem_info[sem['formsemestre_id']] = '<table><tr><td>'+grlink + '</td><td>' + menu + '</td></tr></table>'
else:
sem_info[sem['formsemestre_id']] = grlink
if info['sems']:
Se = sco_parcours_dut.SituationEtudParcours(context.Notes, etud, info['last_formsemestre_id'])
info['liste_inscriptions'] = formsemestre_recap_parcours_table(
context.Notes, Se, etudid, with_links=False, sem_info=sem_info, with_all_columns=False,
a_url='Notes/')
else:
# non inscrit
l = ['<p><b>Etudiant%s non inscrit%s'%(info['ne'],info['ne'])]
if authuser.has_permission(ScoEtudInscrit,context):
l.append('<a href="%s/Notes/formsemestre_inscription_with_modules_form?etudid=%s">inscrire</a></li>'%(context.ScoURL(),etudid))
l.append('</b></b>')
info['liste_inscriptions'] = '\n'.join(l)
# Liste des annotations
alist = []
annos = scolars.etud_annotations_list(cnx, args={ 'etudid' : etudid })
i = 0
for a in annos:
if i % 2: # XXX refaire avec du CSS
a['bgcolor']="#EDEDED"
else:
a['bgcolor'] = "#DEDEDE"
i += 1
if not context.canSuppressAnnotation(a['id'], REQUEST):
a['dellink'] = ''
else:
a['dellink'] = '<td bgcolor="%s" class="annodel"><a href="doSuppressAnnotation?etudid=%s&annotation_id=%s">%s</a></td>' % (a['bgcolor'], etudid, a['id'], context.icons.delete_img.tag(border="0", alt="suppress", title="Supprimer cette annotation"))
alist.append('<tr><td bgcolor="%(bgcolor)s">Le %(date)s par <b>%(author)s</b> (%(zope_authenticated_user)s) :<br/>%(comment)s</td>%(dellink)s</tr>' % a )
info['liste_annotations'] = '\n'.join(alist)
# fiche admission
has_adm_notes = info['math'] or info['physique'] or info['anglais'] or info['francais']
has_bac_info = info['bac'] or info['specialite'] or info['annee_bac'] or info['rapporteur'] or info['commentaire']
if has_bac_info or has_adm_notes:
if has_adm_notes:
adm_tmpl = """<!-- Donnees admission -->
<div class="ficheadmission">
<div class="fichetitre">Informations admission</div>
<table>
<tr><th>Bac</th><th>Année</th><th>Math</th><th>Physique</th><th>Anglais</th><th>Français</th></tr>
<tr>
<td>%(bac)s (%(specialite)s)</td>
<td>%(annee_bac)s </td>
<td>%(math)s</td><td>%(physique)s</td><td>%(anglais)s</td><td>%(francais)s</td>
</tr>
</table>
<div>%(ilycee)s <em>%(rap)s</em></div>
</div>
"""
else:
adm_tmpl = """<!-- Donnees admission (pas de notes) -->
<div class="ficheadmission">
<div class="fichetitre">Informations admission</div>
<div>Bac %(bac)s (%(specialite)s) obtenu en %(annee_bac)s </div>
<div>%(ilycee)s <em>%(rap)s</em></div>
</div>
"""
else:
adm_tmpl = '' # pas de boite "info admission"
info['adm_data'] = adm_tmpl % info
# Fichiers archivés:
info['fichiers_archive_htm'] = '<div class="ficheadmission"><div class="fichetitre">Fichiers associés</div>' + sco_archives_etud.etud_list_archives_html(context, REQUEST, etudid) + '</div>'
# Devenir de l'étudiant:
has_debouche = info['debouche']
if has_debouche:
info['debouche_html'] = """<div class="fichedebouche"><span class="debouche_tit">Devenir:</span><span>%s</span></div>""" % info['debouche']
else:
info['debouche_html'] = '' # pas de boite "devenir"
#
if info['liste_annotations']:
info['tit_anno'] = '<div class="fichetitre">Annotations</div>'
else:
info['tit_anno'] = ''
# Inscriptions
if info['sems']:
rcl = """(<a href="%(ScoURL)s/Notes/formsemestre_validation_etud_form?check=1&etudid=%(etudid)s&formsemestre_id=%(last_formsemestre_id)s&desturl=ficheEtud?etudid=%(etudid)s">récapitulatif parcours</a>)""" % info
else:
rcl = ''
info['inscriptions_mkup'] = """<div class="ficheinscriptions" id="ficheinscriptions">
<div class="fichetitre">Parcours</div>%s
</div>""" % info['liste_inscriptions']
#
if info['groupes'].strip():
info['groupes_row'] = '<tr><td class="fichetitre2">Groupe :</td><td>%(groupes)s</td></tr>'%info
else:
info['groupes_row'] = ''
info['menus_etud'] = menus_etud(context,REQUEST)
tmpl = """<div class="menus_etud">%(menus_etud)s</div>
<div class="ficheEtud" id="ficheEtud"><table>
<tr><td>
<h2>%(nomprenom)s (%(inscription)s)</h2>
<span>%(emaillink)s</span>
</td><td class="photocell">
<a href="etud_photo_orig_page?etudid=%(etudid)s">%(etudfoto)s</a>
</td></tr></table>
<div class="fichesituation">
<div class="fichetablesitu">
<table>
<tr><td class="fichetitre2">Situation :</td><td>%(situation)s</td></tr>
%(groupes_row)s
<tr><td class="fichetitre2">Né%(ne)s le :</td><td>%(info_naissance)s</td></tr>
</table>
<!-- Adresse -->
<div class="ficheadresse" id="ficheadresse">
<table><tr>
<td class="fichetitre2">Adresse :</td><td> %(domicile)s %(codepostaldomicile)s %(villedomicile)s %(paysdomicile)s
%(modifadresse)s
%(telephones)s
</td></tr></table>
</div>
</div>
</div>
%(inscriptions_mkup)s
%(adm_data)s
%(fichiers_archive_htm)s
%(debouche_html)s
<div class="ficheannotations">
%(tit_anno)s
<table width="95%%">%(liste_annotations)s</table>
<form action="doAddAnnotation" method="GET" class="noprint">
<input type="hidden" name="etudid" value="%(etudid)s">
<b>Ajouter une annotation sur %(nomprenom)s: </b>
<table><tr>
<tr><td><textarea name="comment" rows="4" cols="50" value=""></textarea>
<br/><font size=-1><i>Balises HTML autorisées: b, a, i, br, p. Ces annotations sont lisibles par tous les enseignants et le secrétariat.</i></font>
</td></tr>
<tr><td>Auteur : <input type="text" name="author" width=12 value="%(authuser)s">
<input type="submit" value="Ajouter annotation"></td></tr>
</table>
</form>
</div>
<div class="code_nip">code NIP: %(code_nip)s</div>
</div>
"""
header = context.sco_header(
REQUEST,
page_title='Fiche étudiant %(prenom)s %(nom)s'%info,
javascripts=['jQuery/jquery.js', 'js/recap_parcours.js'])
return header + tmpl % info + context.sco_footer(REQUEST)
def menus_etud(context, REQUEST=None):
"""Menu etudiant (operations sur l'etudiant)
"""
if not REQUEST.form.has_key('etudid'):
return ''
authuser = REQUEST.AUTHENTICATED_USER
etud = context.getEtudInfo(filled=1, REQUEST=REQUEST)[0]
menuEtud = [
{ 'title' : '%(sexe)s %(prenom)s %(nom)s' % etud,
'url' : 'ficheEtud?etudid=%(etudid)s' % etud,
'enabled' : True,
'helpmsg' : 'Fiche étudiant'
},
{ 'title' : 'Changer la photo',
'url' : 'formChangePhoto?etudid=%(etudid)s' % etud,
'enabled' : authuser.has_permission(ScoEtudChangeAdr,context),
},
{ 'title' : 'Changer les données identité/admission',
'url' : 'etudident_edit_form?etudid=%(etudid)s' % etud,
'enabled' : authuser.has_permission(ScoEtudInscrit,context),
},
{ 'title' : 'Supprimer cet étudiant...',
'url' : 'etudident_delete?etudid=%(etudid)s' % etud,
'enabled' : authuser.has_permission(ScoEtudInscrit,context),
},
{ 'title' : 'Voir le journal...',
'url' : 'showEtudLog?etudid=%(etudid)s' % etud,
'enabled' : True,
},
]
return makeMenu( 'Etudiant', menuEtud, base_url=context.absolute_url() + '/')
def etud_info_html(context, etudid, REQUEST=None, debug=False):
"""An HTML div with basic information and links about this etud.
Used for popups information windows.
"""
etud = context.getEtudInfo(filled=1, REQUEST=REQUEST)[0]
photo_html = sco_photos.etud_photo_html(context, etud, title='fiche de '+etud['nom'], REQUEST=REQUEST)
etud['photo_html'] = photo_html
H = """<div class="etud_info_div">
<div class="eid_left">
<span class="eid_nom">%(nomprenom)s</span>
</div>
<span class="eid_right">
%(photo_html)s
</span>
</div>""" % etud
if debug:
return context.standard_html_header(context) + H + context.standard_html_footer(context)
else:
return H
|
BigDataforYou/movie_recommendation_workshop_1
|
refs/heads/master
|
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/flask/__init__.py
|
42
|
# -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.11.1'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed, before_render_template
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
|
neilpelow/wmap-django
|
refs/heads/master
|
venv/lib/python3.5/site-packages/django_extensions/management/mysql.py
|
8
|
# -*- coding: utf-8 -*-
from six.moves import configparser
def parse_mysql_cnf(dbinfo):
"""
Attempt to parse mysql database config file for connection settings.
Ideally we would hook into django's code to do this, but read_default_file is handled by the mysql C libs
so we have to emulate the behaviour
Settings that are missing will return ''
returns (user, password, database_name, database_host, database_port)
"""
read_default_file = dbinfo.get('OPTIONS', {}).get('read_default_file')
if read_default_file:
config = configparser.RawConfigParser({
'user': '',
'password': '',
'database': '',
'host': '',
'port': '',
'socket': '',
})
import os
config.read(os.path.expanduser(read_default_file))
try:
user = config.get('client', 'user')
password = config.get('client', 'password')
database_name = config.get('client', 'database')
database_host = config.get('client', 'host')
database_port = config.get('client', 'port')
socket = config.get('client', 'socket')
if database_host == 'localhost' and socket:
# mysql actually uses a socket if host is localhost
database_host = socket
return user, password, database_name, database_host, database_port
except configparser.NoSectionError:
pass
return '', '', '', '', ''
|
spryle/james.spry-leverton.com
|
refs/heads/master
|
setup.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import re
from collections import namedtuple
from pip.req import parse_requirements
from setuptools import setup, find_packages
options = requirements = list(parse_requirements(
os.path.join(os.path.dirname(__file__), 'requirements.txt'),
options=namedtuple('options', [
'skip_requirements_regex',
'default_vcs'
])(None, 'git')
))
install_requires = [
r.name for r in requirements if not r.editable
]
dependency_links = filter(None, [r.url for r in requirements])
version = re.search(
r"__version__\s*=\s*'(.*)'",
open('www/__init__.py').read(),
re.M
).group(1)
setup(
name='james.spry-leverton.com',
version=version,
description='James Spry-Levertons website',
author='James Spry-Leverton',
author_email='james@spry-leverton.com',
url='http://james.spry-leverton.com',
download_url='git+https://github.com/spryle/james.spry-leverton.com',
include_package_data=True,
packages=find_packages(),
setup_requires=['pip'],
install_requires=install_requires,
dependency_links=dependency_links,
)
|
clessg/linux
|
refs/heads/master
|
scripts/gdb/linux/lists.py
|
630
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# list tools
#
# Copyright (c) Thiebaud Weksteen, 2015
#
# Authors:
# Thiebaud Weksteen <thiebaud@weksteen.fr>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
list_head = utils.CachedType("struct list_head")
def list_check(head):
nb = 0
if (head.type == list_head.get_type().pointer()):
head = head.dereference()
elif (head.type != list_head.get_type()):
raise gdb.GdbError('argument must be of type (struct list_head [*])')
c = head
try:
gdb.write("Starting with: {}\n".format(c))
except gdb.MemoryError:
gdb.write('head is not accessible\n')
return
while True:
p = c['prev'].dereference()
n = c['next'].dereference()
try:
if p['next'] != c.address:
gdb.write('prev.next != current: '
'current@{current_addr}={current} '
'prev@{p_addr}={p}\n'.format(
current_addr=c.address,
current=c,
p_addr=p.address,
p=p,
))
return
except gdb.MemoryError:
gdb.write('prev is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
try:
if n['prev'] != c.address:
gdb.write('next.prev != current: '
'current@{current_addr}={current} '
'next@{n_addr}={n}\n'.format(
current_addr=c.address,
current=c,
n_addr=n.address,
n=n,
))
return
except gdb.MemoryError:
gdb.write('next is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
c = n
nb += 1
if c == head:
gdb.write("list is consistent: {} node(s)\n".format(nb))
return
class LxListChk(gdb.Command):
"""Verify a list consistency"""
def __init__(self):
super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
raise gdb.GdbError("lx-list-check takes one argument")
list_check(gdb.parse_and_eval(argv[0]))
LxListChk()
|
marc-sensenich/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_config.py
|
9
|
#!/usr/bin/python
#
# Ansible module to manage configuration on fortios devices
# (c) 2016, Benjamin Jolivot <bjolivot@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: fortios_config
version_added: "2.3"
author: "Benjamin Jolivot (@bjolivot)"
short_description: Manage config on Fortinet FortiOS firewall devices
description:
- This module provides management of FortiOS Devices configuration.
extends_documentation_fragment: fortios
options:
src:
description:
- The I(src) argument provides a path to the configuration template
to load into the remote device.
filter:
description:
- Only for partial backup, you can restrict by giving expected configuration path (ex. firewall address).
default: ""
notes:
- This module requires pyFG python library
"""
EXAMPLES = """
- name: Backup current config
fortios_config:
host: 192.168.0.254
username: admin
password: password
backup: yes
- name: Backup only address objects
fortios_config:
host: 192.168.0.254
username: admin
password: password
backup: yes
backup_path: /tmp/forti_backup/
filter: "firewall address"
- name: Update configuration from file
fortios_config:
host: 192.168.0.254
username: admin
password: password
src: new_configuration.conf.j2
"""
RETURN = """
running_config:
description: full config string
returned: always
type: str
change_string:
description: The commands really executed by the module
returned: only if config changed
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.fortios.fortios import fortios_argument_spec, fortios_required_if
from ansible.module_utils.network.fortios.fortios import backup
# check for pyFG lib
try:
from pyFG import FortiOS, FortiConfig
from pyFG.fortios import logger
from pyFG.exceptions import CommandExecutionException, FailedCommit, ForcedCommit
HAS_PYFG = True
except Exception:
HAS_PYFG = False
# some blocks don't support update, so remove them
NOT_UPDATABLE_CONFIG_OBJECTS = [
"vpn certificate local",
]
def main():
argument_spec = dict(
src=dict(type='str', default=None),
filter=dict(type='str', default=""),
)
argument_spec.update(fortios_argument_spec)
required_if = fortios_required_if
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
)
result = dict(changed=False)
# fail if pyFG not present
if not HAS_PYFG:
module.fail_json(msg='Could not import the python library pyFG required by this module')
# define device
f = FortiOS(module.params['host'],
username=module.params['username'],
password=module.params['password'],
timeout=module.params['timeout'],
vdom=module.params['vdom'])
# connect
try:
f.open()
except Exception:
module.fail_json(msg='Error connecting device')
# get config
try:
f.load_config(path=module.params['filter'])
result['running_config'] = f.running_config.to_text()
except Exception:
module.fail_json(msg='Error reading running config')
# backup config
if module.params['backup']:
backup(module, f.running_config.to_text())
# update config
if module.params['src'] is not None:
# store config in str
try:
conf_str = module.params['src']
f.load_config(in_candidate=True, config_text=conf_str)
except Exception:
module.fail_json(msg="Can't open configuration file, or configuration invalid")
# get updates lines
change_string = f.compare_config()
# remove not updatable parts
c = FortiConfig()
c.parse_config_output(change_string)
for o in NOT_UPDATABLE_CONFIG_OBJECTS:
c.del_block(o)
change_string = c.to_text()
if change_string != "":
result['change_string'] = change_string
result['changed'] = True
# Commit if not check mode
if module.check_mode is False and change_string != "":
try:
f.commit(change_string)
except CommandExecutionException as e:
module.fail_json(msg="Unable to execute command, check your args, the error was {0}".format(e.message))
except FailedCommit as e:
module.fail_json(msg="Unable to commit, check your args, the error was {0}".format(e.message))
except ForcedCommit as e:
module.fail_json(msg="Failed to force commit, check your args, the error was {0}".format(e.message))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
kmonsoor/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/test/test_stringtransport.py
|
56
|
# Copyright (c) 2009-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.test.proto_helpers}.
"""
from zope.interface.verify import verifyObject
from twisted.internet.interfaces import (ITransport, IPushProducer, IConsumer,
IReactorTCP, IReactorSSL, IReactorUNIX, IAddress, IListeningPort,
IConnector)
from twisted.internet.address import IPv4Address
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import (StringTransport, MemoryReactor,
RaisingMemoryReactor)
from twisted.internet.protocol import ClientFactory, Factory
class StringTransportTests(TestCase):
"""
Tests for L{twisted.test.proto_helpers.StringTransport}.
"""
def setUp(self):
self.transport = StringTransport()
def test_interfaces(self):
"""
L{StringTransport} instances provide L{ITransport}, L{IPushProducer},
and L{IConsumer}.
"""
self.assertTrue(verifyObject(ITransport, self.transport))
self.assertTrue(verifyObject(IPushProducer, self.transport))
self.assertTrue(verifyObject(IConsumer, self.transport))
def test_registerProducer(self):
"""
L{StringTransport.registerProducer} records the arguments supplied to
it as instance attributes.
"""
producer = object()
streaming = object()
self.transport.registerProducer(producer, streaming)
self.assertIdentical(self.transport.producer, producer)
self.assertIdentical(self.transport.streaming, streaming)
def test_disallowedRegisterProducer(self):
"""
L{StringTransport.registerProducer} raises L{RuntimeError} if a
producer is already registered.
"""
producer = object()
self.transport.registerProducer(producer, True)
self.assertRaises(
RuntimeError, self.transport.registerProducer, object(), False)
self.assertIdentical(self.transport.producer, producer)
self.assertTrue(self.transport.streaming)
def test_unregisterProducer(self):
"""
L{StringTransport.unregisterProducer} causes the transport to forget
about the registered producer and makes it possible to register a new
one.
"""
oldProducer = object()
newProducer = object()
self.transport.registerProducer(oldProducer, False)
self.transport.unregisterProducer()
self.assertIdentical(self.transport.producer, None)
self.transport.registerProducer(newProducer, True)
self.assertIdentical(self.transport.producer, newProducer)
self.assertTrue(self.transport.streaming)
def test_invalidUnregisterProducer(self):
"""
L{StringTransport.unregisterProducer} raises L{RuntimeError} if called
when no producer is registered.
"""
self.assertRaises(RuntimeError, self.transport.unregisterProducer)
def test_initialProducerState(self):
"""
L{StringTransport.producerState} is initially C{'producing'}.
"""
self.assertEqual(self.transport.producerState, 'producing')
def test_pauseProducing(self):
"""
L{StringTransport.pauseProducing} changes the C{producerState} of the
transport to C{'paused'}.
"""
self.transport.pauseProducing()
self.assertEqual(self.transport.producerState, 'paused')
def test_resumeProducing(self):
"""
L{StringTransport.resumeProducing} changes the C{producerState} of the
transport to C{'producing'}.
"""
self.transport.pauseProducing()
self.transport.resumeProducing()
self.assertEqual(self.transport.producerState, 'producing')
def test_stopProducing(self):
"""
L{StringTransport.stopProducing} changes the C{'producerState'} of the
transport to C{'stopped'}.
"""
self.transport.stopProducing()
self.assertEqual(self.transport.producerState, 'stopped')
def test_stoppedTransportCannotPause(self):
"""
L{StringTransport.pauseProducing} raises L{RuntimeError} if the
transport has been stopped.
"""
self.transport.stopProducing()
self.assertRaises(RuntimeError, self.transport.pauseProducing)
def test_stoppedTransportCannotResume(self):
"""
L{StringTransport.resumeProducing} raises L{RuntimeError} if the
transport has been stopped.
"""
self.transport.stopProducing()
self.assertRaises(RuntimeError, self.transport.resumeProducing)
def test_disconnectingTransportCannotPause(self):
"""
L{StringTransport.pauseProducing} raises L{RuntimeError} if the
transport is being disconnected.
"""
self.transport.loseConnection()
self.assertRaises(RuntimeError, self.transport.pauseProducing)
def test_disconnectingTransportCannotResume(self):
"""
L{StringTransport.resumeProducing} raises L{RuntimeError} if the
transport is being disconnected.
"""
self.transport.loseConnection()
self.assertRaises(RuntimeError, self.transport.resumeProducing)
def test_loseConnectionSetsDisconnecting(self):
"""
L{StringTransport.loseConnection} toggles the C{disconnecting} instance
variable to C{True}.
"""
self.assertFalse(self.transport.disconnecting)
self.transport.loseConnection()
self.assertTrue(self.transport.disconnecting)
def test_specifiedHostAddress(self):
"""
If a host address is passed to L{StringTransport.__init__}, that
value is returned from L{StringTransport.getHost}.
"""
address = object()
self.assertIdentical(StringTransport(address).getHost(), address)
def test_specifiedPeerAddress(self):
"""
If a peer address is passed to L{StringTransport.__init__}, that
value is returned from L{StringTransport.getPeer}.
"""
address = object()
self.assertIdentical(
StringTransport(peerAddress=address).getPeer(), address)
def test_defaultHostAddress(self):
"""
If no host address is passed to L{StringTransport.__init__}, an
L{IPv4Address} is returned from L{StringTransport.getHost}.
"""
address = StringTransport().getHost()
self.assertIsInstance(address, IPv4Address)
def test_defaultPeerAddress(self):
"""
If no peer address is passed to L{StringTransport.__init__}, an
L{IPv4Address} is returned from L{StringTransport.getPeer}.
"""
address = StringTransport().getPeer()
self.assertIsInstance(address, IPv4Address)
class ReactorTests(TestCase):
"""
Tests for L{MemoryReactor} and L{RaisingMemoryReactor}.
"""
def test_memoryReactorProvides(self):
"""
L{MemoryReactor} provides all of the attributes described by the
interfaces it advertises.
"""
memoryReactor = MemoryReactor()
verifyObject(IReactorTCP, memoryReactor)
verifyObject(IReactorSSL, memoryReactor)
verifyObject(IReactorUNIX, memoryReactor)
def test_raisingReactorProvides(self):
"""
L{RaisingMemoryReactor} provides all of the attributes described by the
interfaces it advertises.
"""
raisingReactor = RaisingMemoryReactor()
verifyObject(IReactorTCP, raisingReactor)
verifyObject(IReactorSSL, raisingReactor)
verifyObject(IReactorUNIX, raisingReactor)
def test_connectDestination(self):
"""
L{MemoryReactor.connectTCP}, L{MemoryReactor.connectSSL}, and
L{MemoryReactor.connectUNIX} will return an L{IConnector} whose
C{getDestination} method returns an L{IAddress} with attributes which
reflect the values passed.
"""
memoryReactor = MemoryReactor()
for connector in [memoryReactor.connectTCP(
"test.example.com", 8321, ClientFactory()),
memoryReactor.connectSSL(
"test.example.com", 8321, ClientFactory(),
None)]:
verifyObject(IConnector, connector)
address = connector.getDestination()
verifyObject(IAddress, address)
self.assertEquals(address.host, "test.example.com")
self.assertEquals(address.port, 8321)
connector = memoryReactor.connectUNIX("/fake/path", ClientFactory())
verifyObject(IConnector, connector)
address = connector.getDestination()
verifyObject(IAddress, address)
self.assertEquals(address.name, "/fake/path")
def test_listenDefaultHost(self):
"""
L{MemoryReactor.listenTCP}, L{MemoryReactor.listenSSL} and
L{MemoryReactor.listenUNIX} will return an L{IListeningPort} whose
C{getHost} method returns an L{IAddress}; C{listenTCP} and C{listenSSL}
will have a default host of C{'0.0.0.0'}, and a port that reflects the
value passed, and C{listenUNIX} will have a name that reflects the path
passed.
"""
memoryReactor = MemoryReactor()
for port in [memoryReactor.listenTCP(8242, Factory()),
memoryReactor.listenSSL(8242, Factory(), None)]:
verifyObject(IListeningPort, port)
address = port.getHost()
verifyObject(IAddress, address)
self.assertEquals(address.host, '0.0.0.0')
self.assertEquals(address.port, 8242)
port = memoryReactor.listenUNIX("/path/to/socket", Factory())
verifyObject(IListeningPort, port)
address = port.getHost()
verifyObject(IAddress, address)
self.assertEquals(address.name, "/path/to/socket")
|
allenp/odoo
|
refs/heads/9.0
|
openerp/addons/base/res/__init__.py
|
44
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import res_country
import res_lang
import res_partner
import res_bank
import res_config
import res_currency
import res_font
import res_company
import res_users
import res_request
import res_lang
import ir_property
|
pawaranand/phrerp
|
refs/heads/develop
|
erpnext/accounts/doctype/pricing_rule/pricing_rule.py
|
31
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
import copy
from frappe import throw, _
from frappe.utils import flt, cint
from frappe.model.document import Document
class MultiplePricingRuleConflict(frappe.ValidationError): pass
class PricingRule(Document):
def validate(self):
self.validate_mandatory()
self.validate_applicable_for_selling_or_buying()
self.validate_min_max_qty()
self.cleanup_fields_value()
self.validate_price_or_discount()
self.validate_max_discount()
def validate_mandatory(self):
for field in ["apply_on", "applicable_for"]:
tocheck = frappe.scrub(self.get(field) or "")
if tocheck and not self.get(tocheck):
throw(_("{0} is required").format(self.meta.get_label(tocheck)), frappe.MandatoryError)
def validate_applicable_for_selling_or_buying(self):
if not self.selling and not self.buying:
throw(_("Atleast one of the Selling or Buying must be selected"))
if not self.selling and self.applicable_for in ["Customer", "Customer Group",
"Territory", "Sales Partner", "Campaign"]:
throw(_("Selling must be checked, if Applicable For is selected as {0}"
.format(self.applicable_for)))
if not self.buying and self.applicable_for in ["Supplier", "Supplier Type"]:
throw(_("Buying must be checked, if Applicable For is selected as {0}"
.format(self.applicable_for)))
def validate_min_max_qty(self):
if self.min_qty and self.max_qty and flt(self.min_qty) > flt(self.max_qty):
throw(_("Min Qty can not be greater than Max Qty"))
def cleanup_fields_value(self):
for logic_field in ["apply_on", "applicable_for", "price_or_discount"]:
fieldname = frappe.scrub(self.get(logic_field) or "")
# reset all values except for the logic field
options = (self.meta.get_options(logic_field) or "").split("\n")
for f in options:
if not f: continue
f = frappe.scrub(f)
if f!=fieldname:
self.set(f, None)
def validate_price_or_discount(self):
for field in ["Price", "Discount Percentage"]:
if flt(self.get(frappe.scrub(field))) < 0:
throw(_("{0} can not be negative").format(field))
def validate_max_discount(self):
if self.price_or_discount == "Discount Percentage" and self.item_code:
max_discount = frappe.db.get_value("Item", self.item_code, "max_discount")
if max_discount and flt(self.discount_percentage) > flt(max_discount):
throw(_("Max discount allowed for item: {0} is {1}%").format(self.item_code, max_discount))
#--------------------------------------------------------------------------------
@frappe.whitelist()
def apply_pricing_rule(args):
"""
args = {
"item_list": [{"doctype": "", "name": "", "item_code": "", "brand": "", "item_group": ""}, ...],
"customer": "something",
"customer_group": "something",
"territory": "something",
"supplier": "something",
"supplier_type": "something",
"currency": "something",
"conversion_rate": "something",
"price_list": "something",
"plc_conversion_rate": "something",
"company": "something",
"transaction_date": "something",
"campaign": "something",
"sales_partner": "something",
"ignore_pricing_rule": "something"
}
"""
if isinstance(args, basestring):
args = json.loads(args)
args = frappe._dict(args)
# list of dictionaries
out = []
if args.get("parenttype") == "Material Request": return out
if not args.transaction_type:
args.transaction_type = "buying" if frappe.get_meta(args.parenttype).get_field("supplier") \
else "selling"
item_list = args.get("item_list")
args.pop("item_list")
for item in item_list:
args_copy = copy.deepcopy(args)
args_copy.update(item)
out.append(get_pricing_rule_for_item(args_copy))
return out
def get_pricing_rule_for_item(args):
if args.get("parenttype") == "Material Request": return {}
item_details = frappe._dict({
"doctype": args.doctype,
"name": args.name,
"pricing_rule": None
})
if args.ignore_pricing_rule or not args.item_code:
return item_details
if not (args.item_group and args.brand):
args.item_group, args.brand = frappe.db.get_value("Item", args.item_code, ["item_group", "brand"])
if not args.item_group:
frappe.throw(_("Item Group not mentioned in item master for item {0}").format(args.item_code))
if args.customer and not (args.customer_group and args.territory):
customer = frappe.db.get_value("Customer", args.customer, ["customer_group", "territory"])
if customer:
args.customer_group, args.territory = customer
elif args.supplier and not args.supplier_type:
args.supplier_type = frappe.db.get_value("Supplier", args.supplier, "supplier_type")
pricing_rules = get_pricing_rules(args)
pricing_rule = filter_pricing_rules(args, pricing_rules)
if pricing_rule:
item_details.pricing_rule = pricing_rule.name
if pricing_rule.price_or_discount == "Price":
item_details.update({
"price_list_rate": pricing_rule.price/flt(args.conversion_rate) \
if args.conversion_rate else 0.0,
"discount_percentage": 0.0
})
else:
item_details.discount_percentage = pricing_rule.discount_percentage
return item_details
def get_pricing_rules(args):
def _get_tree_conditions(parenttype, allow_blank=True):
field = frappe.scrub(parenttype)
condition = ""
if args.get(field):
lft, rgt = frappe.db.get_value(parenttype, args[field], ["lft", "rgt"])
parent_groups = frappe.db.sql_list("""select name from `tab%s`
where lft<=%s and rgt>=%s""" % (parenttype, '%s', '%s'), (lft, rgt))
if parent_groups:
if allow_blank: parent_groups.append('')
condition = " ifnull("+field+", '') in ('" + \
"', '".join([d.replace("'", "\\'").replace('"', '\\"') for d in parent_groups])+"')"
return condition
conditions = ""
for field in ["company", "customer", "supplier", "supplier_type", "campaign", "sales_partner"]:
if args.get(field):
conditions += " and ifnull("+field+", '') in (%("+field+")s, '')"
else:
conditions += " and ifnull("+field+", '') = ''"
for parenttype in ["Customer Group", "Territory"]:
group_condition = _get_tree_conditions(parenttype)
if group_condition:
conditions += " and " + group_condition
if not args.price_list: args.price_list = None
conditions += " and ifnull(for_price_list, '') in (%(price_list)s, '')"
if args.get("transaction_date"):
conditions += """ and %(transaction_date)s between ifnull(valid_from, '2000-01-01')
and ifnull(valid_upto, '2500-12-31')"""
item_group_condition = _get_tree_conditions("Item Group", False)
if item_group_condition: item_group_condition = " or " + item_group_condition
return frappe.db.sql("""select * from `tabPricing Rule`
where (item_code=%(item_code)s {item_group_condition} or brand=%(brand)s)
and docstatus < 2 and ifnull(disable, 0) = 0
and ifnull({transaction_type}, 0) = 1 {conditions}
order by priority desc, name desc""".format(
item_group_condition=item_group_condition,
transaction_type=args.transaction_type, conditions=conditions), args, as_dict=1)
def filter_pricing_rules(args, pricing_rules):
# filter for qty
if pricing_rules and args.get("qty"):
pricing_rules = filter(lambda x: (args.qty>=flt(x.min_qty)
and (args.qty<=x.max_qty if x.max_qty else True)), pricing_rules)
# find pricing rule with highest priority
if pricing_rules:
max_priority = max([cint(p.priority) for p in pricing_rules])
if max_priority:
pricing_rules = filter(lambda x: cint(x.priority)==max_priority, pricing_rules)
# apply internal priority
all_fields = ["item_code", "item_group", "brand", "customer", "customer_group", "territory",
"supplier", "supplier_type", "campaign", "sales_partner"]
if len(pricing_rules) > 1:
for field_set in [["item_code", "item_group", "brand"],
["customer", "customer_group", "territory"], ["supplier", "supplier_type"]]:
remaining_fields = list(set(all_fields) - set(field_set))
if if_all_rules_same(pricing_rules, remaining_fields):
pricing_rules = apply_internal_priority(pricing_rules, field_set, args)
break
if len(pricing_rules) > 1:
price_or_discount = list(set([d.price_or_discount for d in pricing_rules]))
if len(price_or_discount) == 1 and price_or_discount[0] == "Discount Percentage":
pricing_rules = filter(lambda x: x.for_price_list==args.price_list, pricing_rules) \
or pricing_rules
if len(pricing_rules) > 1:
frappe.throw(_("Multiple Price Rule exists with same criteria, please resolve \
conflict by assigning priority. Price Rules: {0}")
.format("\n".join([d.name for d in pricing_rules])), MultiplePricingRuleConflict)
elif pricing_rules:
return pricing_rules[0]
def if_all_rules_same(pricing_rules, fields):
all_rules_same = True
val = [pricing_rules[0][k] for k in fields]
for p in pricing_rules[1:]:
if val != [p[k] for k in fields]:
all_rules_same = False
break
return all_rules_same
def apply_internal_priority(pricing_rules, field_set, args):
filtered_rules = []
for field in field_set:
if args.get(field):
filtered_rules = filter(lambda x: x[field]==args[field], pricing_rules)
if filtered_rules: break
return filtered_rules or pricing_rules
|
BoltzmannBrain/nupic
|
refs/heads/master
|
tests/swarming/nupic/swarming/__init__.py
|
175
|
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
|
sacnayak/ssnayak-viz
|
refs/heads/master
|
lib/jinja2/exceptions.py
|
977
|
# -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import imap, text_type, PY2, implements_to_string
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
def __unicode__(self):
return self.message or u''
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self)
if message is None:
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
message = u'none of the templates given were found: ' + \
u', '.join(imap(text_type, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return u'\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
|
SujaySKumar/django
|
refs/heads/master
|
tests/messages_tests/test_fallback.py
|
330
|
from django.contrib.messages import constants
from django.contrib.messages.storage.fallback import (
CookieStorage, FallbackStorage,
)
from django.test import SimpleTestCase
from .base import BaseTests
from .test_cookie import set_cookie_data, stored_cookie_messages_count
from .test_session import set_session_data, stored_session_messages_count
class FallbackTest(BaseTests, SimpleTestCase):
storage_class = FallbackStorage
def get_request(self):
self.session = {}
request = super(FallbackTest, self).get_request()
request.session = self.session
return request
def get_cookie_storage(self, storage):
return storage.storages[-2]
def get_session_storage(self, storage):
return storage.storages[-1]
def stored_cookie_messages_count(self, storage, response):
return stored_cookie_messages_count(self.get_cookie_storage(storage),
response)
def stored_session_messages_count(self, storage, response):
return stored_session_messages_count(self.get_session_storage(storage))
def stored_messages_count(self, storage, response):
"""
Return the storage totals from both cookie and session backends.
"""
total = (self.stored_cookie_messages_count(storage, response) +
self.stored_session_messages_count(storage, response))
return total
def test_get(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
# Set initial cookie data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, example_messages)
# Overwrite the _get method of the fallback storage to prove it is not
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._get = None
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_get_empty(self):
request = self.get_request()
storage = self.storage_class(request)
# Overwrite the _get method of the fallback storage to prove it is not
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._get = None
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), [])
def test_get_fallback(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, example_messages[:4] +
[CookieStorage.not_finished])
set_session_data(session_storage, example_messages[4:])
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_get_fallback_only(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, [CookieStorage.not_finished],
encode_empty=True)
set_session_data(session_storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_flush_used_backends(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
set_cookie_data(cookie_storage, ['cookie', CookieStorage.not_finished])
set_session_data(session_storage, ['session'])
# When updating, previously used but no longer needed backends are
# flushed.
response = self.get_response()
list(storage)
storage.update(response)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 0)
def test_no_fallback(self):
"""
Confirms that:
(1) A short number of messages whose data size doesn't exceed what is
allowed in a cookie will all be stored in the CookieBackend.
(2) If the CookieBackend can store all messages, the SessionBackend
won't be written to at all.
"""
storage = self.get_storage()
response = self.get_response()
# Overwrite the _store method of the fallback storage to prove it isn't
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._store = None
for i in range(5):
storage.add(constants.INFO, str(i) * 100)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 5)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 0)
def test_session_fallback(self):
"""
Confirms that, if the data exceeds what is allowed in a cookie,
messages which did not fit are stored in the SessionBackend.
"""
storage = self.get_storage()
response = self.get_response()
# see comment in CookieText.test_cookie_max_length
msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37)
for i in range(5):
storage.add(constants.INFO, str(i) * msg_size)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 4)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 1)
def test_session_fallback_only(self):
"""
Confirms that large messages, none of which fit in a cookie, are stored
in the SessionBackend (and nothing is stored in the CookieBackend).
"""
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'x' * 5000)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 0)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 1)
|
pizzapanther/HoverMom
|
refs/heads/master
|
hovermom/django/contrib/redirects/middleware.py
|
215
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.redirects.models import Redirect
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ImproperlyConfigured
from django import http
class RedirectFallbackMiddleware(object):
def __init__(self):
if 'django.contrib.sites' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured(
"You cannot use RedirectFallbackMiddleware when "
"django.contrib.sites is not installed."
)
def process_response(self, request, response):
if response.status_code != 404:
return response # No need to check for a redirect for non-404 responses.
full_path = request.get_full_path()
current_site = get_current_site(request)
r = None
try:
r = Redirect.objects.get(site=current_site, old_path=full_path)
except Redirect.DoesNotExist:
pass
if settings.APPEND_SLASH and not request.path.endswith('/'):
# Try appending a trailing slash.
path_len = len(request.path)
full_path = full_path[:path_len] + '/' + full_path[path_len:]
try:
r = Redirect.objects.get(site=current_site, old_path=full_path)
except Redirect.DoesNotExist:
pass
if r is not None:
if r.new_path == '':
return http.HttpResponseGone()
return http.HttpResponsePermanentRedirect(r.new_path)
# No redirect was found. Return the response.
return response
|
Vixionar/django
|
refs/heads/master
|
tests/migrations/test_migrations_no_ancestor/0001_initial.py
|
2995
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
)
]
|
cea-sec/miasm
|
refs/heads/master
|
miasm/arch/x86/jit.py
|
2
|
from builtins import range
import logging
from miasm.jitter.jitload import Jitter, named_arguments
from miasm.arch.x86.sem import Lifter_X86_16, Lifter_X86_32, Lifter_X86_64
from miasm.jitter.codegen import CGen
from miasm.ir.translators.C import TranslatorC
log = logging.getLogger('jit_x86')
hnd = logging.StreamHandler()
hnd.setFormatter(logging.Formatter("[%(levelname)-8s]: %(message)s"))
log.addHandler(hnd)
log.setLevel(logging.CRITICAL)
class x86_32_CGen(CGen):
def __init__(self, lifter):
self.lifter = lifter
self.PC = self.lifter.arch.regs.RIP
self.translator = TranslatorC(self.lifter.loc_db)
self.init_arch_C()
def gen_post_code(self, attrib, pc_value):
out = []
if attrib.log_regs:
# Update PC for dump_gpregs
out.append("%s = %s;" % (self.C_PC, pc_value))
out.append('dump_gpregs_32(jitcpu->cpu);')
return out
class x86_64_CGen(x86_32_CGen):
def gen_post_code(self, attrib, pc_value):
out = []
if attrib.log_regs:
# Update PC for dump_gpregs
out.append("%s = %s;" % (self.C_PC, pc_value))
out.append('dump_gpregs_64(jitcpu->cpu);')
return out
class jitter_x86_16(Jitter):
C_Gen = x86_32_CGen
def __init__(self, loc_db, *args, **kwargs):
Jitter.__init__(self, Lifter_X86_16(loc_db), *args, **kwargs)
self.vm.set_little_endian()
self.lifter.do_stk_segm = False
self.orig_irbloc_fix_regs_for_mode = self.lifter.irbloc_fix_regs_for_mode
self.lifter.irbloc_fix_regs_for_mode = self.lifterbloc_fix_regs_for_mode
def lifterbloc_fix_regs_for_mode(self, irblock, attrib=64):
return self.orig_irbloc_fix_regs_for_mode(irblock, 64)
def push_uint16_t(self, value):
self.cpu.SP -= self.lifter.sp.size // 8
self.vm.set_u16(self.cpu.SP, value)
def pop_uint16_t(self):
value = self.vm.get_u16(self.cpu.SP)
self.cpu.SP += self.lifter.sp.size // 8
return value
def get_stack_arg(self, index):
return self.vm.get_u16(self.cpu.SP + 4 * index)
def init_run(self, *args, **kwargs):
Jitter.init_run(self, *args, **kwargs)
self.cpu.IP = self.pc
class jitter_x86_32(Jitter):
C_Gen = x86_32_CGen
def __init__(self, loc_db, *args, **kwargs):
Jitter.__init__(self, Lifter_X86_32(loc_db), *args, **kwargs)
self.vm.set_little_endian()
self.lifter.do_stk_segm = False
self.orig_irbloc_fix_regs_for_mode = self.lifter.irbloc_fix_regs_for_mode
self.lifter.irbloc_fix_regs_for_mode = self.lifterbloc_fix_regs_for_mode
def lifterbloc_fix_regs_for_mode(self, irblock, attrib=64):
return self.orig_irbloc_fix_regs_for_mode(irblock, 64)
def push_uint16_t(self, value):
self.cpu.ESP -= self.lifter.sp.size // 8
self.vm.set_u16(self.cpu.ESP, value)
def pop_uint16_t(self):
value = self.vm.get_u16(self.cpu.ESP)
self.cpu.ESP += self.lifter.sp.size // 8
return value
def push_uint32_t(self, value):
self.cpu.ESP -= self.lifter.sp.size // 8
self.vm.set_u32(self.cpu.ESP, value)
def pop_uint32_t(self):
value = self.vm.get_u32(self.cpu.ESP)
self.cpu.ESP += self.lifter.sp.size // 8
return value
def get_stack_arg(self, index):
return self.vm.get_u32(self.cpu.ESP + 4 * index)
def init_run(self, *args, **kwargs):
Jitter.init_run(self, *args, **kwargs)
self.cpu.EIP = self.pc
# calling conventions
# stdcall
@named_arguments
def func_args_stdcall(self, n_args):
ret_ad = self.pop_uint32_t()
args = [self.pop_uint32_t() for _ in range(n_args)]
return ret_ad, args
def func_ret_stdcall(self, ret_addr, ret_value1=None, ret_value2=None):
self.pc = self.cpu.EIP = ret_addr
if ret_value1 is not None:
self.cpu.EAX = ret_value1
if ret_value2 is not None:
self.cpu.EDX = ret_value2
def func_prepare_stdcall(self, ret_addr, *args):
for arg in reversed(args):
self.push_uint32_t(arg)
self.push_uint32_t(ret_addr)
get_arg_n_stdcall = get_stack_arg
# cdecl
@named_arguments
def func_args_cdecl(self, n_args):
ret_ad = self.pop_uint32_t()
args = [self.get_stack_arg(i) for i in range(n_args)]
return ret_ad, args
def func_ret_cdecl(self, ret_addr, ret_value1=None, ret_value2=None):
self.pc = self.cpu.EIP = ret_addr
if ret_value1 is not None:
self.cpu.EAX = ret_value1
if ret_value2 is not None:
self.cpu.EDX = ret_value2
get_arg_n_cdecl = get_stack_arg
# System V
func_args_systemv = func_args_cdecl
func_ret_systemv = func_ret_cdecl
func_prepare_systemv = func_prepare_stdcall
get_arg_n_systemv = get_stack_arg
# fastcall
@named_arguments
def func_args_fastcall(self, n_args):
args_regs = ['ECX', 'EDX']
ret_ad = self.pop_uint32_t()
args = []
for i in range(n_args):
args.append(self.get_arg_n_fastcall(i))
return ret_ad, args
def func_prepare_fastcall(self, ret_addr, *args):
args_regs = ['ECX', 'EDX']
for i in range(min(len(args), len(args_regs))):
setattr(self.cpu, args_regs[i], args[i])
remaining_args = args[len(args_regs):]
for arg in reversed(remaining_args):
self.push_uint32_t(arg)
self.push_uint32_t(ret_addr)
def get_arg_n_fastcall(self, index):
args_regs = ['ECX', 'EDX']
if index < len(args_regs):
return getattr(self.cpu, args_regs[index])
return self.get_stack_arg(index - len(args_regs))
def syscall_args_systemv(self, n_args):
# Documentation: http://man7.org/linux/man-pages/man2/syscall.2.html
# args:
# i386 ebx ecx edx esi edi ebp -
args = [self.cpu.EBX, self.cpu.ECX, self.cpu.EDX, self.cpu.ESI,
self.cpu.EDI, self.cpu.EBP][:n_args]
return args
def syscall_ret_systemv(self, value):
# Documentation: http://man7.org/linux/man-pages/man2/syscall.2.html
self.cpu.EAX = value
class jitter_x86_64(Jitter):
C_Gen = x86_64_CGen
args_regs_systemv = ['RDI', 'RSI', 'RDX', 'RCX', 'R8', 'R9']
args_regs_stdcall = ['RCX', 'RDX', 'R8', 'R9']
def __init__(self, loc_db, *args, **kwargs):
Jitter.__init__(self, Lifter_X86_64(loc_db), *args, **kwargs)
self.vm.set_little_endian()
self.lifter.do_stk_segm = False
self.orig_irbloc_fix_regs_for_mode = self.lifter.irbloc_fix_regs_for_mode
self.lifter.irbloc_fix_regs_for_mode = self.lifterbloc_fix_regs_for_mode
def lifterbloc_fix_regs_for_mode(self, irblock, attrib=64):
return self.orig_irbloc_fix_regs_for_mode(irblock, 64)
def push_uint64_t(self, value):
self.cpu.RSP -= self.lifter.sp.size // 8
self.vm.set_u64(self.cpu.RSP, value)
def pop_uint64_t(self):
value = self.vm.get_u64(self.cpu.RSP)
self.cpu.RSP += self.lifter.sp.size // 8
return value
def get_stack_arg(self, index):
return self.vm.get_u64(self.cpu.RSP + 8 * index)
def init_run(self, *args, **kwargs):
Jitter.init_run(self, *args, **kwargs)
self.cpu.RIP = self.pc
# calling conventions
# stdcall
@named_arguments
def func_args_stdcall(self, n_args):
args_regs = self.args_regs_stdcall
ret_ad = self.pop_uint64_t()
args = []
for i in range(min(n_args, 4)):
args.append(self.cpu.get_gpreg()[args_regs[i]])
for i in range(max(0, n_args - 4)):
# Take into account the shadow registers on the stack
# (Microsoft 64bit stdcall ABI)
# => Skip the first 4 stack parameters
args.append(self.get_stack_arg(4 + i))
return ret_ad, args
def func_prepare_stdcall(self, ret_addr, *args):
args_regs = self.args_regs_stdcall
for i in range(min(len(args), len(args_regs))):
setattr(self.cpu, args_regs[i], args[i])
remaining_args = args[len(args_regs):]
for arg in reversed(remaining_args):
self.push_uint64_t(arg)
self.push_uint64_t(ret_addr)
def func_ret_stdcall(self, ret_addr, ret_value=None):
self.pc = self.cpu.RIP = ret_addr
if ret_value is not None:
self.cpu.RAX = ret_value
return True
# cdecl
func_args_cdecl = func_args_stdcall
func_ret_cdecl = func_ret_stdcall
func_prepare_cdecl = func_prepare_stdcall
# System V
def get_arg_n_systemv(self, index):
args_regs = self.args_regs_systemv
if index < len(args_regs):
return getattr(self.cpu, args_regs[index])
return self.get_stack_arg(index - len(args_regs))
@named_arguments
def func_args_systemv(self, n_args):
ret_ad = self.pop_uint64_t()
args = [self.get_arg_n_systemv(index) for index in range(n_args)]
return ret_ad, args
func_ret_systemv = func_ret_cdecl
def func_prepare_systemv(self, ret_addr, *args):
args_regs = self.args_regs_systemv
self.push_uint64_t(ret_addr)
for i in range(min(len(args), len(args_regs))):
setattr(self.cpu, args_regs[i], args[i])
remaining_args = args[len(args_regs):]
for arg in reversed(remaining_args):
self.push_uint64_t(arg)
def syscall_args_systemv(self, n_args):
args = [self.cpu.RDI, self.cpu.RSI, self.cpu.RDX, self.cpu.R10,
self.cpu.R8, self.cpu.R9][:n_args]
return args
def syscall_ret_systemv(self, value):
self.cpu.RAX = value
|
Zhongqilong/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/encodings/cp424.py
|
272
|
""" Python Character Mapping Codec cp424 generated from 'MAPPINGS/VENDORS/MISC/CP424.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp424',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> SELECT
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> REQUIRED NEW LINE
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> GRAPHIC ESCAPE
'\x8d' # 0x09 -> SUPERSCRIPT
'\x8e' # 0x0A -> REPEAT
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> RESTORE/ENABLE PRESENTATION
'\x85' # 0x15 -> NEW LINE
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> PROGRAM OPERATOR COMMUNICATION
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> UNIT BACK SPACE
'\x8f' # 0x1B -> CUSTOMER USE ONE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> DIGIT SELECT
'\x81' # 0x21 -> START OF SIGNIFICANCE
'\x82' # 0x22 -> FIELD SEPARATOR
'\x83' # 0x23 -> WORD UNDERSCORE
'\x84' # 0x24 -> BYPASS OR INHIBIT PRESENTATION
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> SET ATTRIBUTE
'\x89' # 0x29 -> START FIELD EXTENDED
'\x8a' # 0x2A -> SET MODE OR SWITCH
'\x8b' # 0x2B -> CONTROL SEQUENCE PREFIX
'\x8c' # 0x2C -> MODIFY FIELD ATTRIBUTE
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> <reserved>
'\x91' # 0x31 -> <reserved>
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> INDEX RETURN
'\x94' # 0x34 -> PRESENTATION POSITION
'\x95' # 0x35 -> TRANSPARENT
'\x96' # 0x36 -> NUMERIC BACKSPACE
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> SUBSCRIPT
'\x99' # 0x39 -> INDENT TABULATION
'\x9a' # 0x3A -> REVERSE FORM FEED
'\x9b' # 0x3B -> CUSTOMER USE THREE
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> <reserved>
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\u05d0' # 0x41 -> HEBREW LETTER ALEF
'\u05d1' # 0x42 -> HEBREW LETTER BET
'\u05d2' # 0x43 -> HEBREW LETTER GIMEL
'\u05d3' # 0x44 -> HEBREW LETTER DALET
'\u05d4' # 0x45 -> HEBREW LETTER HE
'\u05d5' # 0x46 -> HEBREW LETTER VAV
'\u05d6' # 0x47 -> HEBREW LETTER ZAYIN
'\u05d7' # 0x48 -> HEBREW LETTER HET
'\u05d8' # 0x49 -> HEBREW LETTER TET
'\xa2' # 0x4A -> CENT SIGN
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'|' # 0x4F -> VERTICAL LINE
'&' # 0x50 -> AMPERSAND
'\u05d9' # 0x51 -> HEBREW LETTER YOD
'\u05da' # 0x52 -> HEBREW LETTER FINAL KAF
'\u05db' # 0x53 -> HEBREW LETTER KAF
'\u05dc' # 0x54 -> HEBREW LETTER LAMED
'\u05dd' # 0x55 -> HEBREW LETTER FINAL MEM
'\u05de' # 0x56 -> HEBREW LETTER MEM
'\u05df' # 0x57 -> HEBREW LETTER FINAL NUN
'\u05e0' # 0x58 -> HEBREW LETTER NUN
'\u05e1' # 0x59 -> HEBREW LETTER SAMEKH
'!' # 0x5A -> EXCLAMATION MARK
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'\xac' # 0x5F -> NOT SIGN
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\u05e2' # 0x62 -> HEBREW LETTER AYIN
'\u05e3' # 0x63 -> HEBREW LETTER FINAL PE
'\u05e4' # 0x64 -> HEBREW LETTER PE
'\u05e5' # 0x65 -> HEBREW LETTER FINAL TSADI
'\u05e6' # 0x66 -> HEBREW LETTER TSADI
'\u05e7' # 0x67 -> HEBREW LETTER QOF
'\u05e8' # 0x68 -> HEBREW LETTER RESH
'\u05e9' # 0x69 -> HEBREW LETTER SHIN
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\ufffe' # 0x70 -> UNDEFINED
'\u05ea' # 0x71 -> HEBREW LETTER TAV
'\ufffe' # 0x72 -> UNDEFINED
'\ufffe' # 0x73 -> UNDEFINED
'\xa0' # 0x74 -> NO-BREAK SPACE
'\ufffe' # 0x75 -> UNDEFINED
'\ufffe' # 0x76 -> UNDEFINED
'\ufffe' # 0x77 -> UNDEFINED
'\u2017' # 0x78 -> DOUBLE LOW LINE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\ufffe' # 0x80 -> UNDEFINED
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\ufffe' # 0x9A -> UNDEFINED
'\ufffe' # 0x9B -> UNDEFINED
'\ufffe' # 0x9C -> UNDEFINED
'\xb8' # 0x9D -> CEDILLA
'\ufffe' # 0x9E -> UNDEFINED
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\ufffe' # 0xAA -> UNDEFINED
'\ufffe' # 0xAB -> UNDEFINED
'\ufffe' # 0xAC -> UNDEFINED
'\ufffe' # 0xAD -> UNDEFINED
'\ufffe' # 0xAE -> UNDEFINED
'\xae' # 0xAF -> REGISTERED SIGN
'^' # 0xB0 -> CIRCUMFLEX ACCENT
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'[' # 0xBA -> LEFT SQUARE BRACKET
']' # 0xBB -> RIGHT SQUARE BRACKET
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\ufffe' # 0xCB -> UNDEFINED
'\ufffe' # 0xCC -> UNDEFINED
'\ufffe' # 0xCD -> UNDEFINED
'\ufffe' # 0xCE -> UNDEFINED
'\ufffe' # 0xCF -> UNDEFINED
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\ufffe' # 0xDB -> UNDEFINED
'\ufffe' # 0xDC -> UNDEFINED
'\ufffe' # 0xDD -> UNDEFINED
'\ufffe' # 0xDE -> UNDEFINED
'\ufffe' # 0xDF -> UNDEFINED
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\ufffe' # 0xEB -> UNDEFINED
'\ufffe' # 0xEC -> UNDEFINED
'\ufffe' # 0xED -> UNDEFINED
'\ufffe' # 0xEE -> UNDEFINED
'\ufffe' # 0xEF -> UNDEFINED
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\ufffe' # 0xFB -> UNDEFINED
'\ufffe' # 0xFC -> UNDEFINED
'\ufffe' # 0xFD -> UNDEFINED
'\ufffe' # 0xFE -> UNDEFINED
'\x9f' # 0xFF -> EIGHT ONES
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
LethusTI/supportcenter
|
refs/heads/master
|
vendor/django/django/contrib/admindocs/models.py
|
634
|
# Empty models.py to allow for specifying admindocs as a test label.
|
seanli9jan/tensorflow
|
refs/heads/master
|
tensorflow/python/lib/io/file_io_test.py
|
13
|
# This Python file uses the following encoding: utf-8
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Testing File IO operations in file_io.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
class FileIoTest(test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), "base_dir")
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def testEmptyFilename(self):
f = file_io.FileIO("", mode="r")
with self.assertRaises(errors.NotFoundError):
_ = f.read()
def testFileDoesntExist(self):
file_path = os.path.join(self._base_dir, "temp_file")
self.assertFalse(file_io.file_exists(file_path))
with self.assertRaises(errors.NotFoundError):
_ = file_io.read_file_to_string(file_path)
def testWriteToString(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testAtomicWriteStringToFile(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.atomic_write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testAtomicWriteStringToFileOverwriteFalse(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.atomic_write_string_to_file(file_path, "old", overwrite=False)
with self.assertRaises(errors.AlreadyExistsError):
file_io.atomic_write_string_to_file(file_path, "new", overwrite=False)
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("old", file_contents)
file_io.delete_file(file_path)
file_io.atomic_write_string_to_file(file_path, "new", overwrite=False)
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("new", file_contents)
def testReadBinaryMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
with file_io.FileIO(file_path, mode="rb") as f:
self.assertEqual(b"testing", f.read())
def testWriteBinaryMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, "wb").write("testing")
with file_io.FileIO(file_path, mode="r") as f:
self.assertEqual("testing", f.read())
def testAppend(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("begin\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a1\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a2\n")
with file_io.FileIO(file_path, mode="r") as f:
file_contents = f.read()
self.assertEqual("begin\na1\na2\n", file_contents)
def testMultipleFiles(self):
file_prefix = os.path.join(self._base_dir, "temp_file")
for i in range(5000):
f = file_io.FileIO(file_prefix + str(i), mode="w+")
f.write("testing")
f.flush()
self.assertEqual("testing", f.read())
f.close()
def testMultipleWrites(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("line1\n")
f.write("line2")
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("line1\nline2", file_contents)
def testFileWriteBadMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="r").write("testing")
def testFileReadBadMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="w").read()
def testFileDelete(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_io.delete_file(file_path)
self.assertFalse(file_io.file_exists(file_path))
def testFileDeleteFail(self):
file_path = os.path.join(self._base_dir, "temp_file")
with self.assertRaises(errors.NotFoundError):
file_io.delete_file(file_path)
def testGetMatchingFiles(self):
dir_path = os.path.join(self._base_dir, "temp_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = os.path.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
expected_match = [os.path.join(dir_path, name) for name in files]
self.assertItemsEqual(
file_io.get_matching_files(os.path.join(dir_path, "file*.txt")),
expected_match)
self.assertItemsEqual(file_io.get_matching_files(tuple()), [])
files_subset = [
os.path.join(dir_path, files[0]), os.path.join(dir_path, files[2])
]
self.assertItemsEqual(
file_io.get_matching_files(files_subset), files_subset)
file_io.delete_recursively(dir_path)
self.assertFalse(file_io.file_exists(os.path.join(dir_path, "file3.txt")))
def testCreateRecursiveDir(self):
dir_path = os.path.join(self._base_dir, "temp_dir/temp_dir1/temp_dir2")
file_io.recursive_create_dir(dir_path)
file_io.recursive_create_dir(dir_path) # repeat creation
file_path = os.path.join(dir_path, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
file_io.delete_recursively(os.path.join(self._base_dir, "temp_dir"))
self.assertFalse(file_io.file_exists(file_path))
def testCopy(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.copy(file_path, copy_path)
self.assertTrue(file_io.file_exists(copy_path))
f = file_io.FileIO(file_path, mode="r")
self.assertEqual("testing", f.read())
self.assertEqual(7, f.tell())
def testCopyOverwrite(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
file_io.copy(file_path, copy_path, overwrite=True)
self.assertTrue(file_io.file_exists(copy_path))
self.assertEqual("testing", file_io.FileIO(file_path, mode="r").read())
def testCopyOverwriteFalse(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
with self.assertRaises(errors.AlreadyExistsError):
file_io.copy(file_path, copy_path, overwrite=False)
def testRename(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.rename(file_path, rename_path)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwrite(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
file_io.rename(file_path, rename_path, overwrite=True)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwriteFalse(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
with self.assertRaises(errors.AlreadyExistsError):
file_io.rename(file_path, rename_path, overwrite=False)
self.assertTrue(file_io.file_exists(rename_path))
self.assertTrue(file_io.file_exists(file_path))
def testDeleteRecursivelyFail(self):
fake_dir_path = os.path.join(self._base_dir, "temp_dir")
with self.assertRaises(errors.NotFoundError):
file_io.delete_recursively(fake_dir_path)
def testIsDirectory(self):
dir_path = os.path.join(self._base_dir, "test_dir")
# Failure for a non-existing dir.
self.assertFalse(file_io.is_directory(dir_path))
file_io.create_dir(dir_path)
self.assertTrue(file_io.is_directory(dir_path))
file_path = os.path.join(dir_path, "test_file")
file_io.FileIO(file_path, mode="w").write("test")
# False for a file.
self.assertFalse(file_io.is_directory(file_path))
# Test that the value returned from `stat()` has `is_directory` set.
file_statistics = file_io.stat(dir_path)
self.assertTrue(file_statistics.is_directory)
def testListDirectory(self):
dir_path = os.path.join(self._base_dir, "test_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = os.path.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
subdir_path = os.path.join(dir_path, "sub_dir")
file_io.create_dir(subdir_path)
subdir_file_path = os.path.join(subdir_path, "file4.txt")
file_io.FileIO(subdir_file_path, mode="w").write("testing")
dir_list = file_io.list_directory(dir_path)
self.assertItemsEqual(files + ["sub_dir"], dir_list)
def testListDirectoryFailure(self):
dir_path = os.path.join(self._base_dir, "test_dir")
with self.assertRaises(errors.NotFoundError):
file_io.list_directory(dir_path)
def _setupWalkDirectories(self, dir_path):
# Creating a file structure as follows
# test_dir -> file: file1.txt; dirs: subdir1_1, subdir1_2, subdir1_3
# subdir1_1 -> file: file3.txt
# subdir1_2 -> dir: subdir2
file_io.create_dir(dir_path)
file_io.FileIO(
os.path.join(dir_path, "file1.txt"), mode="w").write("testing")
sub_dirs1 = ["subdir1_1", "subdir1_2", "subdir1_3"]
for name in sub_dirs1:
file_io.create_dir(os.path.join(dir_path, name))
file_io.FileIO(
os.path.join(dir_path, "subdir1_1/file2.txt"),
mode="w").write("testing")
file_io.create_dir(os.path.join(dir_path, "subdir1_2/subdir2"))
def testWalkInOrder(self):
dir_path = os.path.join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path)
# Now test the walk (in_order = True)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=True):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [dir_path] + [
os.path.join(dir_path, item)
for item in
["subdir1_1", "subdir1_2", "subdir1_2/subdir2", "subdir1_3"]
])
self.assertEqual(dir_path, all_dirs[0])
self.assertLess(
all_dirs.index(os.path.join(dir_path, "subdir1_2")),
all_dirs.index(os.path.join(dir_path, "subdir1_2/subdir2")))
self.assertItemsEqual(all_subdirs[1:5], [[], ["subdir2"], [], []])
self.assertItemsEqual(all_subdirs[0],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file1.txt"], ["file2.txt"], [], [], []])
self.assertLess(
all_files.index(["file1.txt"]), all_files.index(["file2.txt"]))
def testWalkPostOrder(self):
dir_path = os.path.join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path)
# Now test the walk (in_order = False)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [
os.path.join(dir_path, item)
for item in
["subdir1_1", "subdir1_2/subdir2", "subdir1_2", "subdir1_3"]
] + [dir_path])
self.assertEqual(dir_path, all_dirs[4])
self.assertLess(
all_dirs.index(os.path.join(dir_path, "subdir1_2/subdir2")),
all_dirs.index(os.path.join(dir_path, "subdir1_2")))
self.assertItemsEqual(all_subdirs[0:4], [[], [], ["subdir2"], []])
self.assertItemsEqual(all_subdirs[4],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file2.txt"], [], [], [], ["file1.txt"]])
self.assertLess(
all_files.index(["file2.txt"]), all_files.index(["file1.txt"]))
def testWalkFailure(self):
dir_path = os.path.join(self._base_dir, "test_dir")
# Try walking a directory that wasn't created.
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [])
self.assertItemsEqual(all_subdirs, [])
self.assertItemsEqual(all_files, [])
def testStat(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_statistics = file_io.stat(file_path)
os_statistics = os.stat(file_path)
self.assertEqual(7, file_statistics.length)
self.assertEqual(
int(os_statistics.st_mtime), int(file_statistics.mtime_nsec / 1e9))
self.assertFalse(file_statistics.is_directory)
def testReadLine(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.readline())
self.assertEqual("testing2\n", f.readline())
self.assertEqual("testing3\n", f.readline())
self.assertEqual("\n", f.readline())
self.assertEqual("testing5", f.readline())
self.assertEqual("", f.readline())
def testRead(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.read(9))
self.assertEqual("testing2\n", f.read(9))
self.assertEqual("t", f.read(1))
self.assertEqual("esting3\n\ntesting5", f.read())
def testTell(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
self.assertEqual(27, f.tell())
self.assertEqual("\n", f.readline())
self.assertEqual(28, f.tell())
self.assertEqual("testing5", f.readline())
self.assertEqual(36, f.tell())
self.assertEqual("", f.readline())
self.assertEqual(36, f.tell())
def testSeek(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(18)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(0)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(-1)
with self.assertRaises(TypeError):
f.seek()
# TODO(jhseu): Delete after position deprecation.
with self.assertRaises(TypeError):
f.seek(offset=0, position=0)
f.seek(position=9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
def testSeekFromWhat(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(9, 1)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9, 0)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(-f.size(), 2)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(0, 3)
def testReadingIterator(self):
file_path = os.path.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
with file_io.FileIO(file_path, mode="r+") as f:
f.write("".join(data))
actual_data = []
for line in f:
actual_data.append(line)
self.assertSequenceEqual(actual_data, data)
def testReadlines(self):
file_path = os.path.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
f = file_io.FileIO(file_path, mode="r+")
f.write("".join(data))
f.flush()
lines = f.readlines()
self.assertSequenceEqual(lines, data)
def testUTF8StringPath(self):
file_path = os.path.join(self._base_dir, "UTF8测试_file")
file_io.write_string_to_file(file_path, "testing")
with file_io.FileIO(file_path, mode="rb") as f:
self.assertEqual(b"testing", f.read())
def testEof(self):
"""Test that reading past EOF does not raise an exception."""
file_path = os.path.join(self._base_dir, "temp_file")
f = file_io.FileIO(file_path, mode="r+")
content = "testing"
f.write(content)
f.flush()
self.assertEqual(content, f.read(len(content) + 1))
def testUTF8StringPathExists(self):
file_path = os.path.join(self._base_dir, "UTF8测试_file_exist")
file_io.write_string_to_file(file_path, "testing")
v = file_io.file_exists(file_path)
self.assertEqual(v, True)
def testFilecmp(self):
file1 = os.path.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
file2 = os.path.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is another sentence\n" * 100)
file3 = os.path.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, u"This is another sentence\n" * 100)
self.assertFalse(file_io.filecmp(file1, file2))
self.assertTrue(file_io.filecmp(file2, file3))
def testFilecmpSameSize(self):
file1 = os.path.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
file2 = os.path.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is b sentence\n" * 100)
file3 = os.path.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, u"This is b sentence\n" * 100)
self.assertFalse(file_io.filecmp(file1, file2))
self.assertTrue(file_io.filecmp(file2, file3))
def testFilecmpBinary(self):
file1 = os.path.join(self._base_dir, "file1")
file_io.FileIO(file1, "wb").write("testing\n\na")
file2 = os.path.join(self._base_dir, "file2")
file_io.FileIO(file2, "wb").write("testing\n\nb")
file3 = os.path.join(self._base_dir, "file3")
file_io.FileIO(file3, "wb").write("testing\n\nb")
file4 = os.path.join(self._base_dir, "file4")
file_io.FileIO(file4, "wb").write("testing\n\ntesting")
self.assertFalse(file_io.filecmp(file1, file2))
self.assertFalse(file_io.filecmp(file1, file4))
self.assertTrue(file_io.filecmp(file2, file3))
def testFileCrc32(self):
file1 = os.path.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
crc1 = file_io.file_crc32(file1)
file2 = os.path.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is another sentence\n" * 100)
crc2 = file_io.file_crc32(file2)
file3 = os.path.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, "This is another sentence\n" * 100)
crc3 = file_io.file_crc32(file3)
self.assertTrue(crc1 != crc2)
self.assertEqual(crc2, crc3)
def testFileCrc32WithBytes(self):
file1 = os.path.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
crc1 = file_io.file_crc32(file1, block_size=24)
file2 = os.path.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is another sentence\n" * 100)
crc2 = file_io.file_crc32(file2, block_size=24)
file3 = os.path.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, "This is another sentence\n" * 100)
crc3 = file_io.file_crc32(file3, block_size=-1)
self.assertTrue(crc1 != crc2)
self.assertEqual(crc2, crc3)
def testFileCrc32Binary(self):
file1 = os.path.join(self._base_dir, "file1")
file_io.FileIO(file1, "wb").write("testing\n\n")
crc1 = file_io.file_crc32(file1)
file2 = os.path.join(self._base_dir, "file2")
file_io.FileIO(file2, "wb").write("testing\n\n\n")
crc2 = file_io.file_crc32(file2)
file3 = os.path.join(self._base_dir, "file3")
file_io.FileIO(file3, "wb").write("testing\n\n\n")
crc3 = file_io.file_crc32(file3)
self.assertTrue(crc1 != crc2)
self.assertEqual(crc2, crc3)
def testMatchingFilesPermission(self):
# Create top level directory test_dir.
dir_path = os.path.join(self._base_dir, "test_dir")
file_io.create_dir(dir_path)
# Create second level directories `noread` and `any`.
noread_path = os.path.join(dir_path, "noread")
file_io.create_dir(noread_path)
any_path = os.path.join(dir_path, "any")
file_io.create_dir(any_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = os.path.join(any_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
file_path = os.path.join(noread_path, "file4.txt")
file_io.FileIO(file_path, mode="w").write("testing")
# Change noread to noread access.
os.chmod(noread_path, 0)
expected_match = [os.path.join(any_path, name) for name in files]
self.assertItemsEqual(
file_io.get_matching_files(os.path.join(dir_path, "*", "file*.txt")),
expected_match)
# Change noread back so that it could be cleaned during tearDown.
os.chmod(noread_path, 0o777)
if __name__ == "__main__":
test.main()
|
EliasTouil/simpleBlog
|
refs/heads/master
|
simpleBlog/Lib/site-packages/dateutil/tz/win.py
|
48
|
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
from six.moves import winreg
from six import text_type
try:
import ctypes
from ctypes import wintypes
except ValueError:
# ValueError is raised on non-Windows systems for some horrible reason.
raise ImportError("Running tzwin on non-Windows system")
from ._common import tzname_in_python2, _tzinfo
from ._common import tzrangebase
__all__ = ["tzwin", "tzwinlocal", "tzres"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
return TZKEYNAME
TZKEYNAME = _settzkeyname()
class tzres(object):
"""
Class for accessing `tzres.dll`, which contains timezone name related
resources.
.. versionadded:: 2.5.0
"""
p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
def __init__(self, tzres_loc='tzres.dll'):
# Load the user32 DLL so we can load strings from tzres
user32 = ctypes.WinDLL('user32')
# Specify the LoadStringW function
user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
wintypes.UINT,
wintypes.LPWSTR,
ctypes.c_int)
self.LoadStringW = user32.LoadStringW
self._tzres = ctypes.WinDLL(tzres_loc)
self.tzres_loc = tzres_loc
def load_name(self, offset):
"""
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
..note:
Offsets found in the registry are generally of the form
`@tzres.dll,-114`. The offset in this case if 114, not -114.
"""
resource = self.p_wchar()
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
return resource[:nchar]
def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith('@'):
return tzname_str
name_splt = tzname_str.split(',-')
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset)
class tzwinbase(tzrangebase):
"""tzinfo class based on win32's timezones available in the registry."""
def __init__(self):
raise NotImplementedError('tzwinbase is an abstract base class')
def __eq__(self, other):
# Compare on all relevant dimensions, including name.
if not isinstance(other, tzwinbase):
return NotImplemented
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._stddayofweek == other._stddayofweek and
self._dstdayofweek == other._dstdayofweek and
self._stdweeknumber == other._stdweeknumber and
self._dstweeknumber == other._dstweeknumber and
self._stdhour == other._stdhour and
self._dsthour == other._dsthour and
self._stdminute == other._stdminute and
self._dstminute == other._dstminute and
self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr)
@staticmethod
def list():
"""Return a list of all time zones known to the system."""
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
return result
def display(self):
return self._display
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
# Ambiguous dates default to the STD side
dstoff -= self._dst_base_offset
return dston, dstoff
def _get_hasdst(self):
return self._dstmonth != 0
@property
def _dst_base_offset(self):
return self._dst_base_offset_
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
# multiple contexts only possible in 2.7 and 3.1, we still support 2.6
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
tzkeyname = text_type("{kn}\{name}").format(kn=TZKEYNAME, name=name)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
keydict = valuestodict(tzkey)
self._std_abbr = keydict["Std"]
self._dst_abbr = keydict["Dlt"]
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
dstoffset = stdoffset-tup[2] # + DaylightBias * -1
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
keydict = valuestodict(tzlocalkey)
self._std_abbr = keydict["StandardName"]
self._dst_abbr = keydict["DaylightName"]
try:
tzkeyname = text_type('{kn}\{sn}').format(kn=TZKEYNAME,
sn=self._std_abbr)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
except OSError:
self._display = None
stdoffset = -keydict["Bias"]-keydict["StandardBias"]
dstoffset = stdoffset-keydict["DaylightBias"]
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# For reasons unclear, in this particular key, the day of week has been
# moved to the END of the SYSTEMTIME structure.
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:5]
self._stddayofweek = tup[7]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:5]
self._dstdayofweek = tup[7]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwinlocal()"
def __str__(self):
# str will return the standard name, not the daylight name.
return "tzwinlocal(%s)" % repr(self._std_abbr)
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
""" dayofweek == 0 means Sunday, whichweek 5 means last instance """
first = datetime.datetime(year, month, 1, hour, minute)
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
# Because 7 % 7 = 0
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
wd = weekdayone + ((whichweek - 1) * ONEWEEK)
if (wd.month != month):
wd -= ONEWEEK
return wd
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dout = {}
size = winreg.QueryInfoKey(key)[1]
tz_res = None
for i in range(size):
key_name, value, dtype = winreg.EnumValue(key, i)
if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
# If it's a DWORD (32-bit integer), it's stored as unsigned - convert
# that to a proper signed integer
if value & (1 << 31):
value = value - (1 << 32)
elif dtype == winreg.REG_SZ:
# If it's a reference to the tzres DLL, load the actual string
if value.startswith('@tzres'):
tz_res = tz_res or tzres()
value = tz_res.name_from_string(value)
value = value.rstrip('\x00') # Remove trailing nulls
dout[key_name] = value
return dout
|
m-wichmann/miscStuff
|
refs/heads/master
|
BV_1/opencv.py
|
1
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
"""this script does an analysis of the video and outputs the direction of the street"""
# Notes:
# cv.ShowImage( "My Video Window", frame)
# cv.WaitKey(50)
#
# pixel access:
# frame[row,column]
#
# cv.SaveImage("bilder/bild" + str(f).zfill(4) + ".png", frame)
#
#
# cv.Circle(frameout, (x,y), 5, (255,0,0), thickness=2)
#
#
# bin image:
# 255 -> Weiß
# 0 -> Schwarz
import cv
import border
def main():
"""main method"""
print "Start"
# open video file
filepath = "ocup.avi"
data = openfile(filepath)
# DEBUG: just to skip some frames
# for f in xrange(300):
# frame = cv.QueryFrame(data["video"])
# TODO: last image is empty?
for f in xrange(data["fcount"] - 1):
# for f in xrange(1):
# query next frame from video
frame = cv.QueryFrame(data["video"])
# create images to store... well... images...
framebw = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, 1)
framebin = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, 1)
frameedges = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, 1)
frameout = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, 1)
frameoutcol = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, 3)
# rgb to grayscale
cv.CvtColor(frame,framebw,cv.CV_BGR2GRAY)
# grayscale to binary
cv.Threshold(framebw, framebin, 150, 255, cv.CV_THRESH_BINARY);
# detect edges with canny...
# cv.Canny(framebin,frameedges,150,300,3)
# cv.Canny(framebin,frameedges,150,100,3)
cv.Copy(framebin, frameout)
cv.CvtColor(frameout, frameoutcol, cv.CV_GRAY2RGB)
# check the image and get result if street is straight or curved
result = checkimage(framebin, frameoutcol)
# TODO: implement state machine or something
if result == 0:
print "straight"
if result == -1:
print "left"
if result == 1:
print "right"
cv.ShowImage("window", frameoutcol)
cv.WaitKey(50000)
# delete used ressources
deleteRessources(data)
print "Done"
def checkimage(frame, frameout):
"""check image and return if street is straigth (0), left (-1) or right (1)"""
ret = border.checkimage(frame, frameout)
return ret
def deleteRessources(data):
"""delete used resources (namely the capture object)"""
del(data["video"])
def openfile(filepath):
"""open video at filepath and return dict with data"""
# capture video from file
video = cv.CaptureFromFile(filepath)
# extract some information
width = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_HEIGHT))
fps = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FPS))
fcount = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_COUNT))
# print video data
print "======"
print "Opened file: " + filepath
print "Width: " + str(width)
print "Height: " + str(height)
print "FPS: " + str(fps)
print "Frame count: " + str(fcount)
print "======"
# store data in dict
# TODO: check if necesarry
data = {}
data["video"] = video
data["height"] = height
data["width"] = width
data["fps"] = fps
data["fcount"] = fcount
return data
if __name__ == '__main__':
main()
|
structRecomputation/computations
|
refs/heads/master
|
_modules/auxiliary_shared.py
|
1
|
from string import Formatter
import pandas as pd
import numpy as np
import subprocess
import argparse
import shutil
import socket
import errno
import shlex
import copy
import os
from clsMail import MailCls
from respy.python.simulate.simulate_auxiliary import write_out
import respy
PROJECT_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_DIR = PROJECT_DIR.replace('/_modules', '')
SPEC_DIR = PROJECT_DIR + '/parametrizations'
RSLT_DIR = PROJECT_DIR + '/results'
EXACT_DIR = RSLT_DIR + '/exact_solution'
def estimate_static(is_debug, maxfun_static):
""" Estimate a static economy as a baseline.
"""
# First perform a static estimation on the exact baseline data
os.mkdir('static'), os.chdir('static')
respy_obj = respy.RespyCls(SPEC_DIR + '/data_one.ini')
respy_obj.unlock()
respy_obj.set_attr('file_est', EXACT_DIR + '/data_one/data.respy.dat')
respy_obj.set_attr('maxfun', maxfun_static)
respy_obj.set_attr('num_agents_sim', 100)
respy_obj.set_attr('num_agents_est', 100)
respy_obj.set_attr('delta', 0.0)
respy_obj.lock()
# Clarify some additional requests for the simulations fo the start and finish samples.
sim_args = None
if not is_debug:
sim_args = dict()
sim_args['is_interpolated'] = False
sim_args['num_draws_emax'] = 100000
respy_obj.write_out()
# Simulate a sample with starting values and then start the estimation.
simulate_samples('start', respy_obj, sim_args)
start_vals, _ = respy.estimate(respy_obj)
# Simulate a sample with the estimated samples. Estimate the static model and simulate the
# results.
respy_obj.update_model_paras(start_vals)
simulate_samples('finish', respy_obj, sim_args)
os.chdir('../')
# Finishing
return start_vals
def simulate_samples(dirname, respy_obj, kwargs=None):
""" Simulate a sample in new subdirectory for the purposes of visual
inspection,
"""
respy_copy = copy.deepcopy(respy_obj)
if os.path.exists(dirname):
shutil.rmtree(dirname)
# Update parameters for simulation.
if kwargs is not None:
respy_copy.unlock()
for key_ in kwargs.keys():
respy_copy.set_attr(key_, kwargs[key_])
respy_copy.lock()
os.mkdir(dirname), os.chdir(dirname)
respy_copy.attr['file_sim'] = dirname + '_sample.dat'
respy_copy.write_out()
respy.simulate(respy_copy)
del respy_copy
os.chdir('../')
def enter_results_dir(which):
""" This function creates the requested results directory and switches the working directory.
"""
# Ensure that directory structure exists
if not os.path.exists(RSLT_DIR):
os.mkdir(RSLT_DIR)
dirname = RSLT_DIR + '/' + which
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.mkdir(dirname)
source_dir = os.getcwd()
os.chdir(dirname)
return source_dir
def write_bootstrap_sample(respy_obj, which, seed):
""" Write out a bootstrap sample for estimation.
"""
fname = EXACT_DIR + '/data_' + which + '/data.respy.dat'
df = pd.read_csv(fname, delim_whitespace=True,
header=-1, na_values='.', dtype={0: np.int, 1: np.int, 2: np.int,
3: np.float, 4: np.int, 5: np.int, 6: np.int, 7: np.int}, nrows=40000)
df.set_index([0, 1], drop=False, inplace=True)
np.random.seed(seed)
index_subsample = np.random.choice(range(1000), 100, replace=False).tolist()
df_sample = df.loc[index_subsample]
write_out(respy_obj, df_sample)
def get_seeds(num_draws, seed=789):
""" Get a list of seeds to create the Monte Carlo datasets.
"""
np.random.seed(seed)
seeds = np.random.choice(range(1000), size=num_draws, replace=False).tolist()
return seeds
def process_command_line(description):
""" Distribute command line arguments.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--debug', action='store_true', dest='is_debug',
help='use debug specification')
parser.add_argument('--procs', action='store', type=int, dest='num_procs',
default=1, help='use multiple processors')
# Process command line arguments
args = parser.parse_args()
# Extract arguments
num_procs = args.num_procs
is_debug = args.is_debug
# Check arguments
assert (is_debug in [True, False])
assert (isinstance(num_procs, int))
assert (num_procs > 0)
# Finishing
return is_debug, num_procs
def cleanup():
# Cleanup
if socket.gethostname() != 'vagrant':
subprocess.call(['git', 'clean', '-f', '-d'])
def get_optimization_info():
""" Get some additional information about the optimization.
"""
with open('est.respy.info') as in_file:
for line in in_file.readlines():
# Split line
list_ = shlex.split(line)
# Skip empty lines
if not list_:
continue
# Check for applicability
if len(list_) < 4:
continue
if list_[2] == 'Steps':
num_steps = int(list_[3])
if list_[2] == 'Evaluations':
num_evals = int(list_[3])
# Finishing
return num_evals, num_steps
def send_notification(which):
""" Finishing up a run of the testing battery.
"""
hostname = socket.gethostname()
subject = ' STRUCT_RECOMPUTATION: '
if which == 'exact':
subject += 'Exact Solutions'
message = ' Exact solutions'
elif which == 'correct':
subject += 'Correct Choices'
message = ' Analysis of correct choices'
elif which == 'monte':
subject += 'Monte Carlo Investigation'
message = ' Results from Monte Carlo exercise'
elif which == 'criterions':
subject += 'Criterions Investigation'
message = ' Results from the criterions investigation'
elif which == 'schemes':
subject += 'Schemes Investigation'
message = ' Results from the schemes investigation'
elif which == 'smoothing':
subject += 'Smoothing Investigation'
message = ' Results from the smoothing investigation'
elif which == 'performance':
subject += 'Performance Investigation'
message = ' Results from the performance investigation'
else:
raise AssertionError
message += ' are available on @' + hostname + '.'
mail_obj = MailCls()
mail_obj.set_attr('subject', subject)
mail_obj.set_attr('message', message)
# This allows to use the same scripts for the recomputation material.
try:
mail_obj.lock()
mail_obj.send()
except:
pass
def to_string(input):
""" This function transfers the input to a string.
"""
if isinstance(input, int):
return '{0:05d}'.format(input)
elif isinstance(input, float):
return '{:.5f}'.format(input)
else:
raise NotImplementedError
def mkdir_p(path):
""" Make directory, including parent directory (if required).
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_choice_probabilities(fname, is_flatten=True):
""" Get the choice probabilities.
"""
# Initialize container.
stats = np.tile(np.nan, (0, 4))
with open(fname) as in_file:
for line in in_file.readlines():
# Split line
list_ = shlex.split(line)
# Skip empty lines
if not list_:
continue
# If OUTCOMES is reached, then we are done for good.
if list_[0] == 'Outcomes':
break
# Any lines that do not have an integer as their first element
# are not of interest.
try:
int(list_[0])
except ValueError:
continue
# All lines that make it down here are relevant.
stats = np.vstack((stats, [float(x) for x in list_[1:]]))
# Return all statistics as a flattened array.
if is_flatten:
stats = stats.flatten()
# Finishing
return stats
def strfdelta(tdelta, fmt):
""" Get a string from a timedelta.
"""
f, d = Formatter(), {}
l = {'D': 86400, 'H': 3600, 'M': 60, 'S': 1}
k = list(map(lambda x: x[1], list(f.parse(fmt))))
rem = int(tdelta.total_seconds())
for i in ('D', 'H', 'M', 'S'):
if i in k and i in l.keys():
d[i], rem = divmod(rem, l[i])
return f.format(fmt, **d)
def formatting_duration(start_time, finish_time):
""" This function formats the time objects to a pretty string that indicates the duration of
the estimation.
"""
duration_str = strfdelta(finish_time - start_time, "{H:02}:{M:02}:{S:02}")
return duration_str
|
dd00/commandergenius
|
refs/heads/dd00
|
project/jni/python/src/Lib/plat-mac/applesingle.py
|
31
|
r"""Routines to decode AppleSingle files
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the applesingle module is removed.", stacklevel=2)
import struct
import sys
try:
import MacOS
import Carbon.File
except:
class MacOS:
def openrf(path, mode):
return open(path + '.rsrc', mode)
openrf = classmethod(openrf)
class Carbon:
class File:
class FSSpec:
pass
class FSRef:
pass
class Alias:
pass
# all of the errors in this module are really errors in the input
# so I think it should test positive against ValueError.
class Error(ValueError):
pass
# File header format: magic, version, unused, number of entries
AS_HEADER_FORMAT=">LL16sh"
AS_HEADER_LENGTH=26
# The flag words for AppleSingle
AS_MAGIC=0x00051600
AS_VERSION=0x00020000
# Entry header format: id, offset, length
AS_ENTRY_FORMAT=">lll"
AS_ENTRY_LENGTH=12
# The id values
AS_DATAFORK=1
AS_RESOURCEFORK=2
AS_IGNORE=(3,4,5,6,8,9,10,11,12,13,14,15)
class AppleSingle(object):
datafork = None
resourcefork = None
def __init__(self, fileobj, verbose=False):
header = fileobj.read(AS_HEADER_LENGTH)
try:
magic, version, ig, nentry = struct.unpack(AS_HEADER_FORMAT, header)
except ValueError, arg:
raise Error, "Unpack header error: %s" % (arg,)
if verbose:
print 'Magic: 0x%8.8x' % (magic,)
print 'Version: 0x%8.8x' % (version,)
print 'Entries: %d' % (nentry,)
if magic != AS_MAGIC:
raise Error, "Unknown AppleSingle magic number 0x%8.8x" % (magic,)
if version != AS_VERSION:
raise Error, "Unknown AppleSingle version number 0x%8.8x" % (version,)
if nentry <= 0:
raise Error, "AppleSingle file contains no forks"
headers = [fileobj.read(AS_ENTRY_LENGTH) for i in xrange(nentry)]
self.forks = []
for hdr in headers:
try:
restype, offset, length = struct.unpack(AS_ENTRY_FORMAT, hdr)
except ValueError, arg:
raise Error, "Unpack entry error: %s" % (arg,)
if verbose:
print "Fork %d, offset %d, length %d" % (restype, offset, length)
fileobj.seek(offset)
data = fileobj.read(length)
if len(data) != length:
raise Error, "Short read: expected %d bytes got %d" % (length, len(data))
self.forks.append((restype, data))
if restype == AS_DATAFORK:
self.datafork = data
elif restype == AS_RESOURCEFORK:
self.resourcefork = data
def tofile(self, path, resonly=False):
outfile = open(path, 'wb')
data = False
if resonly:
if self.resourcefork is None:
raise Error, "No resource fork found"
fp = open(path, 'wb')
fp.write(self.resourcefork)
fp.close()
elif (self.resourcefork is None and self.datafork is None):
raise Error, "No useful forks found"
else:
if self.datafork is not None:
fp = open(path, 'wb')
fp.write(self.datafork)
fp.close()
if self.resourcefork is not None:
fp = MacOS.openrf(path, '*wb')
fp.write(self.resourcefork)
fp.close()
def decode(infile, outpath, resonly=False, verbose=False):
"""decode(infile, outpath [, resonly=False, verbose=False])
Creates a decoded file from an AppleSingle encoded file.
If resonly is True, then it will create a regular file at
outpath containing only the resource fork from infile.
Otherwise it will create an AppleDouble file at outpath
with the data and resource forks from infile. On platforms
without the MacOS module, it will create inpath and inpath+'.rsrc'
with the data and resource forks respectively.
"""
if not hasattr(infile, 'read'):
if isinstance(infile, Carbon.File.Alias):
infile = infile.ResolveAlias()[0]
if isinstance(infile, (Carbon.File.FSSpec, Carbon.File.FSRef)):
infile = infile.as_pathname()
infile = open(infile, 'rb')
asfile = AppleSingle(infile, verbose=verbose)
asfile.tofile(outpath, resonly=resonly)
def _test():
if len(sys.argv) < 3 or sys.argv[1] == '-r' and len(sys.argv) != 4:
print 'Usage: applesingle.py [-r] applesinglefile decodedfile'
sys.exit(1)
if sys.argv[1] == '-r':
resonly = True
del sys.argv[1]
else:
resonly = False
decode(sys.argv[1], sys.argv[2], resonly=resonly)
if __name__ == '__main__':
_test()
|
cainiaocome/ssbc
|
refs/heads/master
|
search/admin.py
|
26
|
from django.contrib import admin
from search.models import Hash, FileList, StatusReport, RecKeywords
# Register your models here.
admin.site.register(Hash)
admin.site.register(FileList)
admin.site.register(StatusReport)
admin.site.register(RecKeywords)
|
public-ink/public-ink
|
refs/heads/master
|
server/appengine/lib/numpy/polynomial/_polybase.py
|
13
|
"""
Abstract base class for the various polynomial Classes.
The ABCPolyBase class provides the methods needed to implement the common API
for the various polynomial classes. It operates as a mixin, but uses the
abc module from the stdlib, hence it is only available for Python >= 2.6.
"""
from __future__ import division, absolute_import, print_function
from abc import ABCMeta, abstractmethod, abstractproperty
from numbers import Number
import numpy as np
from . import polyutils as pu
__all__ = ['ABCPolyBase']
class ABCPolyBase(object):
"""An abstract base class for series classes.
ABCPolyBase provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the
methods listed below.
.. versionadded:: 1.9.0
Parameters
----------
coef : array_like
Series coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where
``P_i`` is the basis polynomials of degree ``i``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is the derived class domain.
window : (2,) array_like, optional
Window, see domain for its use. The default value is the
derived class window.
Attributes
----------
coef : (N,) ndarray
Series coefficients in order of increasing degree.
domain : (2,) ndarray
Domain that is mapped to window.
window : (2,) ndarray
Window that domain is mapped to.
Class Attributes
----------------
maxpower : int
Maximum power allowed, i.e., the largest number ``n`` such that
``p(x)**n`` is allowed. This is to limit runaway polynomial size.
domain : (2,) ndarray
Default domain of the class.
window : (2,) ndarray
Default window of the class.
"""
__metaclass__ = ABCMeta
# Not hashable
__hash__ = None
# Opt out of numpy ufuncs and Python ops with ndarray subclasses.
__array_ufunc__ = None
# Limit runaway size. T_n^m has degree n*m
maxpower = 100
@abstractproperty
def domain(self):
pass
@abstractproperty
def window(self):
pass
@abstractproperty
def nickname(self):
pass
@abstractmethod
def _add(self):
pass
@abstractmethod
def _sub(self):
pass
@abstractmethod
def _mul(self):
pass
@abstractmethod
def _div(self):
pass
@abstractmethod
def _pow(self):
pass
@abstractmethod
def _val(self):
pass
@abstractmethod
def _int(self):
pass
@abstractmethod
def _der(self):
pass
@abstractmethod
def _fit(self):
pass
@abstractmethod
def _line(self):
pass
@abstractmethod
def _roots(self):
pass
@abstractmethod
def _fromroots(self):
pass
def has_samecoef(self, other):
"""Check if coefficients match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``coef`` attribute.
Returns
-------
bool : boolean
True if the coefficients are the same, False otherwise.
"""
if len(self.coef) != len(other.coef):
return False
elif not np.all(self.coef == other.coef):
return False
else:
return True
def has_samedomain(self, other):
"""Check if domains match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``domain`` attribute.
Returns
-------
bool : boolean
True if the domains are the same, False otherwise.
"""
return np.all(self.domain == other.domain)
def has_samewindow(self, other):
"""Check if windows match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``window`` attribute.
Returns
-------
bool : boolean
True if the windows are the same, False otherwise.
"""
return np.all(self.window == other.window)
def has_sametype(self, other):
"""Check if types match.
.. versionadded:: 1.7.0
Parameters
----------
other : object
Class instance.
Returns
-------
bool : boolean
True if other is same class as self
"""
return isinstance(other, self.__class__)
def _get_coefficients(self, other):
"""Interpret other as polynomial coefficients.
The `other` argument is checked to see if it is of the same
class as self with identical domain and window. If so,
return its coefficients, otherwise return `other`.
.. versionadded:: 1.9.0
Parameters
----------
other : anything
Object to be checked.
Returns
-------
coef
The coefficients of`other` if it is a compatible instance,
of ABCPolyBase, otherwise `other`.
Raises
------
TypeError
When `other` is an incompatible instance of ABCPolyBase.
"""
if isinstance(other, ABCPolyBase):
if not isinstance(other, self.__class__):
raise TypeError("Polynomial types differ")
elif not np.all(self.domain == other.domain):
raise TypeError("Domains differ")
elif not np.all(self.window == other.window):
raise TypeError("Windows differ")
return other.coef
return other
def __init__(self, coef, domain=None, window=None):
[coef] = pu.as_series([coef], trim=False)
self.coef = coef
if domain is not None:
[domain] = pu.as_series([domain], trim=False)
if len(domain) != 2:
raise ValueError("Domain has wrong number of elements.")
self.domain = domain
if window is not None:
[window] = pu.as_series([window], trim=False)
if len(window) != 2:
raise ValueError("Window has wrong number of elements.")
self.window = window
def __repr__(self):
format = "%s(%s, %s, %s)"
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
window = repr(self.window)[6:-1]
name = self.__class__.__name__
return format % (name, coef, domain, window)
def __str__(self):
format = "%s(%s)"
coef = str(self.coef)
name = self.nickname
return format % (name, coef)
# Pickle and copy
def __getstate__(self):
ret = self.__dict__.copy()
ret['coef'] = self.coef.copy()
ret['domain'] = self.domain.copy()
ret['window'] = self.window.copy()
return ret
def __setstate__(self, dict):
self.__dict__ = dict
# Call
def __call__(self, arg):
off, scl = pu.mapparms(self.domain, self.window)
arg = off + scl*arg
return self._val(arg, self.coef)
def __iter__(self):
return iter(self.coef)
def __len__(self):
return len(self.coef)
# Numeric properties.
def __neg__(self):
return self.__class__(-self.coef, self.domain, self.window)
def __pos__(self):
return self
def __add__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._add(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __sub__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._sub(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __mul__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._mul(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __div__(self, other):
# set to __floordiv__, /, for now.
return self.__floordiv__(other)
def __truediv__(self, other):
# there is no true divide if the rhs is not a Number, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if not isinstance(other, Number) or isinstance(other, bool):
form = "unsupported types for true division: '%s', '%s'"
raise TypeError(form % (type(self), type(other)))
return self.__floordiv__(other)
def __floordiv__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[0]
def __mod__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[1]
def __divmod__(self, other):
try:
othercoef = self._get_coefficients(other)
quo, rem = self._div(self.coef, othercoef)
except (TypeError, ZeroDivisionError) as e:
raise e
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
def __pow__(self, other):
coef = self._pow(self.coef, other, maxpower=self.maxpower)
res = self.__class__(coef, self.domain, self.window)
return res
def __radd__(self, other):
try:
coef = self._add(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rsub__(self, other):
try:
coef = self._sub(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rmul__(self, other):
try:
coef = self._mul(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rdiv__(self, other):
# set to __floordiv__ /.
return self.__rfloordiv__(other)
def __rtruediv__(self, other):
# An instance of ABCPolyBase is not considered a
# Number.
return NotImplemented
def __rfloordiv__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[0]
def __rmod__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[1]
def __rdivmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
except ZeroDivisionError as e:
raise e
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
# Enhance me
# some augmented arithmetic operations could be added here
def __eq__(self, other):
res = (isinstance(other, self.__class__) and
np.all(self.domain == other.domain) and
np.all(self.window == other.window) and
(self.coef.shape == other.coef.shape) and
np.all(self.coef == other.coef))
return res
def __ne__(self, other):
return not self.__eq__(other)
#
# Extra methods.
#
def copy(self):
"""Return a copy.
Returns
-------
new_series : series
Copy of self.
"""
return self.__class__(self.coef, self.domain, self.window)
def degree(self):
"""The degree of the series.
.. versionadded:: 1.5.0
Returns
-------
degree : int
Degree of the series, one less than the number of coefficients.
"""
return len(self) - 1
def cutdeg(self, deg):
"""Truncate series to the given degree.
Reduce the degree of the series to `deg` by discarding the
high order terms. If `deg` is greater than the current degree a
copy of the current series is returned. This can be useful in least
squares where the coefficients of the high degree terms may be very
small.
.. versionadded:: 1.5.0
Parameters
----------
deg : non-negative int
The series is reduced to degree `deg` by discarding the high
order terms. The value of `deg` must be a non-negative integer.
Returns
-------
new_series : series
New instance of series with reduced degree.
"""
return self.truncate(deg + 1)
def trim(self, tol=0):
"""Remove trailing coefficients
Remove trailing coefficients until a coefficient is reached whose
absolute value greater than `tol` or the beginning of the series is
reached. If all the coefficients would be removed the series is set
to ``[0]``. A new series instance is returned with the new
coefficients. The current instance remains unchanged.
Parameters
----------
tol : non-negative number.
All trailing coefficients less than `tol` will be removed.
Returns
-------
new_series : series
Contains the new set of coefficients.
"""
coef = pu.trimcoef(self.coef, tol)
return self.__class__(coef, self.domain, self.window)
def truncate(self, size):
"""Truncate series to length `size`.
Reduce the series to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer. This
can be useful in least squares where the coefficients of the
high degree terms may be very small.
Parameters
----------
size : positive int
The series is reduced to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer.
Returns
-------
new_series : series
New instance of series with truncated coefficients.
"""
isize = int(size)
if isize != size or isize < 1:
raise ValueError("size must be a positive integer")
if isize >= len(self.coef):
coef = self.coef
else:
coef = self.coef[:isize]
return self.__class__(coef, self.domain, self.window)
def convert(self, domain=None, kind=None, window=None):
"""Convert series to a different kind and/or domain and/or window.
Parameters
----------
domain : array_like, optional
The domain of the converted series. If the value is None,
the default domain of `kind` is used.
kind : class, optional
The polynomial series type class to which the current instance
should be converted. If kind is None, then the class of the
current instance is used.
window : array_like, optional
The window of the converted series. If the value is None,
the default window of `kind` is used.
Returns
-------
new_series : series
The returned class can be of different type than the current
instance and/or have a different domain and/or different
window.
Notes
-----
Conversion between domains and class types can result in
numerically ill defined series.
Examples
--------
"""
if kind is None:
kind = self.__class__
if domain is None:
domain = kind.domain
if window is None:
window = kind.window
return self(kind.identity(domain, window=window))
def mapparms(self):
"""Return the mapping parameters.
The returned values define a linear map ``off + scl*x`` that is
applied to the input arguments before the series is evaluated. The
map depends on the ``domain`` and ``window``; if the current
``domain`` is equal to the ``window`` the resulting map is the
identity. If the coefficients of the series instance are to be
used by themselves outside this class, then the linear function
must be substituted for the ``x`` in the standard representation of
the base polynomials.
Returns
-------
off, scl : float or complex
The mapping function is defined by ``off + scl*x``.
Notes
-----
If the current domain is the interval ``[l1, r1]`` and the window
is ``[l2, r2]``, then the linear mapping function ``L`` is
defined by the equations::
L(l1) = l2
L(r1) = r2
"""
return pu.mapparms(self.domain, self.window)
def integ(self, m=1, k=[], lbnd=None):
"""Integrate.
Return a series instance that is the definite integral of the
current series.
Parameters
----------
m : non-negative int
The number of integrations to perform.
k : array_like
Integration constants. The first constant is applied to the
first integration, the second to the second, and so on. The
list of values must less than or equal to `m` in length and any
missing values are set to zero.
lbnd : Scalar
The lower bound of the definite integral.
Returns
-------
new_series : series
A new series representing the integral. The domain is the same
as the domain of the integrated series.
"""
off, scl = self.mapparms()
if lbnd is None:
lbnd = 0
else:
lbnd = off + scl*lbnd
coef = self._int(self.coef, m, k, lbnd, 1./scl)
return self.__class__(coef, self.domain, self.window)
def deriv(self, m=1):
"""Differentiate.
Return a series instance of that is the derivative of the current
series.
Parameters
----------
m : non-negative int
Find the derivative of order `m`.
Returns
-------
new_series : series
A new series representing the derivative. The domain is the same
as the domain of the differentiated series.
"""
off, scl = self.mapparms()
coef = self._der(self.coef, m, scl)
return self.__class__(coef, self.domain, self.window)
def roots(self):
"""Return the roots of the series polynomial.
Compute the roots for the series. Note that the accuracy of the
roots decrease the further outside the domain they lie.
Returns
-------
roots : ndarray
Array containing the roots of the series.
"""
roots = self._roots(self.coef)
return pu.mapdomain(roots, self.window, self.domain)
def linspace(self, n=100, domain=None):
"""Return x, y values at equally spaced points in domain.
Returns the x, y values at `n` linearly spaced points across the
domain. Here y is the value of the polynomial at the points x. By
default the domain is the same as that of the series instance.
This method is intended mostly as a plotting aid.
.. versionadded:: 1.5.0
Parameters
----------
n : int, optional
Number of point pairs to return. The default value is 100.
domain : {None, array_like}, optional
If not None, the specified domain is used instead of that of
the calling instance. It should be of the form ``[beg,end]``.
The default is None which case the class domain is used.
Returns
-------
x, y : ndarray
x is equal to linspace(self.domain[0], self.domain[1], n) and
y is the series evaluated at element of x.
"""
if domain is None:
domain = self.domain
x = np.linspace(domain[0], domain[1], n)
y = self(x)
return x, y
@classmethod
def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None,
window=None):
"""Least squares fit to data.
Return a series instance that is the least squares fit to the data
`y` sampled at `x`. The domain of the returned instance can be
specified and this will often result in a superior fit with less
chance of ill conditioning.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
domain : {None, [beg, end], []}, optional
Domain to use for the returned series. If ``None``,
then a minimal domain that covers the points `x` is chosen. If
``[]`` the class domain is used. The default value was the
class domain in NumPy 1.4 and ``None`` in later versions.
The ``[]`` option was added in numpy 1.5.0.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is len(x)*eps, where eps is the
relative precision of the float type, about 2e-16 in most
cases.
full : bool, optional
Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
Weights. If not None the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products
``w[i]*y[i]`` all have the same variance. The default value is
None.
.. versionadded:: 1.5.0
window : {[beg, end]}, optional
Window to use for the returned series. The default
value is the default class domain
.. versionadded:: 1.6.0
Returns
-------
new_series : series
A series that represents the least squares fit to the data and
has the domain specified in the call.
[resid, rank, sv, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
"""
if domain is None:
domain = pu.getdomain(x)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
xnew = pu.mapdomain(x, domain, window)
res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)
if full:
[coef, status] = res
return cls(coef, domain=domain, window=window), status
else:
coef = res
return cls(coef, domain=domain, window=window)
@classmethod
def fromroots(cls, roots, domain=[], window=None):
"""Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
Returns
-------
new_series : series
Series with the specified roots.
"""
[roots] = pu.as_series([roots], trim=False)
if domain is None:
domain = pu.getdomain(roots)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
deg = len(roots)
off, scl = pu.mapparms(domain, window)
rnew = off + scl*roots
coef = cls._fromroots(rnew) / scl**deg
return cls(coef, domain=domain, window=window)
@classmethod
def identity(cls, domain=None, window=None):
"""Identity function.
If ``p`` is the returned series, then ``p(x) == x`` for all
values of x.
Parameters
----------
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
Series of representing the identity.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
off, scl = pu.mapparms(window, domain)
coef = cls._line(off, scl)
return cls(coef, domain, window)
@classmethod
def basis(cls, deg, domain=None, window=None):
"""Series basis polynomial of degree `deg`.
Returns the series representing the basis polynomial of degree `deg`.
.. versionadded:: 1.7.0
Parameters
----------
deg : int
Degree of the basis polynomial for the series. Must be >= 0.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series with the coefficient of the `deg` term set to one and
all others zero.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
ideg = int(deg)
if ideg != deg or ideg < 0:
raise ValueError("deg must be non-negative integer")
return cls([0]*ideg + [1], domain, window)
@classmethod
def cast(cls, series, domain=None, window=None):
"""Convert series to series of this class.
The `series` is expected to be an instance of some polynomial
series of one of the types supported by by the numpy.polynomial
module, but could be some other class that supports the convert
method.
.. versionadded:: 1.7.0
Parameters
----------
series : series
The series instance to be converted.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series of the same kind as the calling class and equal to
`series` when evaluated.
See Also
--------
convert : similar instance method
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
return series.convert(domain, cls, window)
|
person142/scipy
|
refs/heads/master
|
scipy/special/tests/test_spfun_stats.py
|
9
|
import numpy as np
from numpy.testing import (assert_array_equal,
assert_array_almost_equal_nulp, assert_almost_equal)
from pytest import raises as assert_raises
from scipy.special import gammaln, multigammaln
class TestMultiGammaLn(object):
def test1(self):
# A test of the identity
# Gamma_1(a) = Gamma(a)
np.random.seed(1234)
a = np.abs(np.random.randn())
assert_array_equal(multigammaln(a, 1), gammaln(a))
def test2(self):
# A test of the identity
# Gamma_2(a) = sqrt(pi) * Gamma(a) * Gamma(a - 0.5)
a = np.array([2.5, 10.0])
result = multigammaln(a, 2)
expected = np.log(np.sqrt(np.pi)) + gammaln(a) + gammaln(a - 0.5)
assert_almost_equal(result, expected)
def test_bararg(self):
assert_raises(ValueError, multigammaln, 0.5, 1.2)
def _check_multigammaln_array_result(a, d):
# Test that the shape of the array returned by multigammaln
# matches the input shape, and that all the values match
# the value computed when multigammaln is called with a scalar.
result = multigammaln(a, d)
assert_array_equal(a.shape, result.shape)
a1 = a.ravel()
result1 = result.ravel()
for i in range(a.size):
assert_array_almost_equal_nulp(result1[i], multigammaln(a1[i], d))
def test_multigammaln_array_arg():
# Check that the array returned by multigammaln has the correct
# shape and contains the correct values. The cases have arrays
# with several differnent shapes.
# The cases include a regression test for ticket #1849
# (a = np.array([2.0]), an array with a single element).
np.random.seed(1234)
cases = [
# a, d
(np.abs(np.random.randn(3, 2)) + 5, 5),
(np.abs(np.random.randn(1, 2)) + 5, 5),
(np.arange(10.0, 18.0).reshape(2, 2, 2), 3),
(np.array([2.0]), 3),
(np.float64(2.0), 3),
]
for a, d in cases:
_check_multigammaln_array_result(a, d)
|
zhangpanrobot/myblog
|
refs/heads/master
|
node_modules/testem/node_modules/tap/node_modules/yamlish/yamlish-py/test/__init__.py
|
161
|
# -*- coding: utf-8 -*- IGNORE:C0111
from __future__ import absolute_import, print_function, unicode_literals
import logging
import yamlish
import yaml
import tempfile
import textwrap
INPUT = 1
OUTPUT = 2
if yamlish.py3k:
unicode = str
#logging.basicConfig(level=logging.DEBUG)
def _generate_test_name(source):
"""
Clean up human-friendly test name into a method name.
"""
out = source.replace(' ', '_').replace(':', '').replace(',', '').lower()
return "test_%s" % out
def _create_input_test(test_src, tested_function, options=None):
"""
Decorate tested function to be used as a method for TestCase.
"""
def do_test_expected(self):
"""
Execute a test by calling a tested_function on test_src data.
"""
self.maxDiff = None
got = ""
if 'error' in test_src:
self.assertRaises(test_src['error'], tested_function,
test_src['in'], options)
else:
want = test_src['out']
got = tested_function(test_src['in'], options)
logging.debug('got = type %s', type(got))
logging.debug("test_src['out'] = %s",
unicode(test_src['out']))
self.assertEqual(got, want, """Result matches
expected = %s
observed = %s
""" % (want, got))
return do_test_expected
def _create_output_test(test_src, tested_function, options=None):
"""
Decorate tested function to be used as a method for TestCase.
"""
def do_test_expected(self):
"""
Execute a test by calling a tested_function on test_src data.
"""
self.maxDiff = None
# We currently don't throw any exceptions in Writer, so this
# this is always false
if 'error' in test_src:
self.assertRaises(test_src['error'], yamlish.dumps,
test_src['in'], options)
else:
logging.debug("out:\n%s", textwrap.dedent(test_src['out']))
want = yaml.load(textwrap.dedent(test_src['out']))
logging.debug("want:\n%s", want)
with tempfile.NamedTemporaryFile() as test_file:
tested_function(test_src['in'], test_file)
test_file.seek(0)
got_str = test_file.read()
logging.debug("got_str = %s", got_str)
got = yaml.load(got_str)
self.assertEqual(got, want, "Result matches")
return do_test_expected
def generate_testsuite(test_data, test_case_shell, test_fce, direction=INPUT,
options=None):
"""
Generate tests from the test data, class to build upon and function
to use for testing.
"""
for in_test in test_data:
if ('skip' in in_test) and in_test['skip']:
logging.debug("test %s skipped!", in_test['name'])
continue
name = _generate_test_name(in_test['name'])
if direction == INPUT:
test_method = _create_input_test(in_test, test_fce,
options=options)
elif direction == OUTPUT:
test_method = _create_output_test(in_test, test_fce,
options=options)
test_method.__name__ = str('test_%s' % name)
setattr(test_case_shell, test_method.__name__, test_method)
|
louyihua/edx-platform
|
refs/heads/master
|
lms/djangoapps/email_marketing/tasks.py
|
7
|
"""
This file contains celery tasks for email marketing signal handler.
"""
import logging
import time
from celery import task
from django.core.cache import cache
from email_marketing.models import EmailMarketingConfiguration
from student.models import EnrollStatusChange
from sailthru.sailthru_client import SailthruClient
from sailthru.sailthru_error import SailthruClientError
log = logging.getLogger(__name__)
# pylint: disable=not-callable
@task(bind=True, default_retry_delay=3600, max_retries=24)
def update_user(self, sailthru_vars, email, new_user=False, activation=False):
"""
Adds/updates Sailthru profile information for a user.
Args:
sailthru_vars(dict): User profile information to pass as 'vars' to Sailthru
email(str): User email address
new_user(boolean): True if new registration
activation(boolean): True if activation request
Returns:
None
"""
email_config = EmailMarketingConfiguration.current()
if not email_config.enabled:
return
sailthru_client = SailthruClient(email_config.sailthru_key, email_config.sailthru_secret)
try:
sailthru_response = sailthru_client.api_post("user",
_create_sailthru_user_parm(sailthru_vars, email,
new_user, email_config))
except SailthruClientError as exc:
log.error("Exception attempting to add/update user %s in Sailthru - %s", email, unicode(exc))
raise self.retry(exc=exc,
countdown=email_config.sailthru_retry_interval,
max_retries=email_config.sailthru_max_retries)
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
log.error("Error attempting to add/update user in Sailthru: %s", error.get_message())
if _retryable_sailthru_error(error):
raise self.retry(countdown=email_config.sailthru_retry_interval,
max_retries=email_config.sailthru_max_retries)
return
# if activating user, send welcome email
if activation and email_config.sailthru_activation_template:
try:
sailthru_response = sailthru_client.api_post("send",
{"email": email,
"template": email_config.sailthru_activation_template})
except SailthruClientError as exc:
log.error("Exception attempting to send welcome email to user %s in Sailthru - %s", email, unicode(exc))
raise self.retry(exc=exc,
countdown=email_config.sailthru_retry_interval,
max_retries=email_config.sailthru_max_retries)
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
log.error("Error attempting to send welcome email to user in Sailthru: %s", error.get_message())
if _retryable_sailthru_error(error):
raise self.retry(countdown=email_config.sailthru_retry_interval,
max_retries=email_config.sailthru_max_retries)
# pylint: disable=not-callable
@task(bind=True, default_retry_delay=3600, max_retries=24)
def update_user_email(self, new_email, old_email):
"""
Adds/updates Sailthru when a user email address is changed
Args:
username(str): A string representation of user identifier
old_email(str): Original email address
Returns:
None
"""
email_config = EmailMarketingConfiguration.current()
if not email_config.enabled:
return
# ignore if email not changed
if new_email == old_email:
return
sailthru_parms = {"id": old_email, "key": "email", "keysconflict": "merge", "keys": {"email": new_email}}
try:
sailthru_client = SailthruClient(email_config.sailthru_key, email_config.sailthru_secret)
sailthru_response = sailthru_client.api_post("user", sailthru_parms)
except SailthruClientError as exc:
log.error("Exception attempting to update email for %s in Sailthru - %s", old_email, unicode(exc))
raise self.retry(exc=exc,
countdown=email_config.sailthru_retry_interval,
max_retries=email_config.sailthru_max_retries)
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
log.error("Error attempting to update user email address in Sailthru: %s", error.get_message())
if _retryable_sailthru_error(error):
raise self.retry(countdown=email_config.sailthru_retry_interval,
max_retries=email_config.sailthru_max_retries)
def _create_sailthru_user_parm(sailthru_vars, email, new_user, email_config):
"""
Create sailthru user create/update parms
"""
sailthru_user = {'id': email, 'key': 'email'}
sailthru_user['vars'] = dict(sailthru_vars, last_changed_time=int(time.time()))
# if new user add to list
if new_user and email_config.sailthru_new_user_list:
sailthru_user['lists'] = {email_config.sailthru_new_user_list: 1}
return sailthru_user
# pylint: disable=not-callable
@task(bind=True, default_retry_delay=3600, max_retries=24)
def update_course_enrollment(self, email, course_url, event, mode,
course_id=None, message_id=None): # pylint: disable=unused-argument
"""
Adds/updates Sailthru when a user enrolls/unenrolls/adds to cart/purchases/upgrades a course
Args:
email(str): The user's email address
course_url(str): Course home page url
event(str): event type
mode(str): enroll mode (audit, verification, ...)
unit_cost: cost if purchase event
course_id(str): course run id
currency(str): currency if purchase event - currently ignored since Sailthru only supports USD
Returns:
None
The event can be one of the following:
EnrollStatusChange.enroll
A free enroll (mode=audit or honor)
EnrollStatusChange.unenroll
An unenroll
EnrollStatusChange.upgrade_start
A paid upgrade added to cart - ignored
EnrollStatusChange.upgrade_complete
A paid upgrade purchase complete - ignored
EnrollStatusChange.paid_start
A non-free course added to cart - ignored
EnrollStatusChange.paid_complete
A non-free course purchase complete - ignored
"""
email_config = EmailMarketingConfiguration.current()
if not email_config.enabled:
return
# Use event type to figure out processing required
unenroll = False
send_template = None
cost_in_cents = 0
if event == EnrollStatusChange.enroll:
send_template = email_config.sailthru_enroll_template
# set cost so that Sailthru recognizes the event
cost_in_cents = email_config.sailthru_enroll_cost
elif event == EnrollStatusChange.unenroll:
# unenroll - need to update list of unenrolled courses for user in Sailthru
unenroll = True
else:
# All purchase events should be handled by ecommerce, so ignore
return
sailthru_client = SailthruClient(email_config.sailthru_key, email_config.sailthru_secret)
# update the "unenrolled" course array in the user record on Sailthru
if not _update_unenrolled_list(sailthru_client, email, course_url, unenroll):
raise self.retry(countdown=email_config.sailthru_retry_interval,
max_retries=email_config.sailthru_max_retries)
# if there is a cost, call Sailthru purchase api to record
if cost_in_cents:
# get course information if configured and appropriate event
course_data = {}
if email_config.sailthru_get_tags_from_sailthru:
course_data = _get_course_content(course_url, sailthru_client, email_config)
# build item description
item = _build_purchase_item(course_id, course_url, cost_in_cents, mode, course_data)
# build purchase api options list
options = {}
# add appropriate send template
if send_template:
options['send_template'] = send_template
if not _record_purchase(sailthru_client, email, item, message_id, options):
raise self.retry(countdown=email_config.sailthru_retry_interval,
max_retries=email_config.sailthru_max_retries)
def _build_purchase_item(course_id_string, course_url, cost_in_cents, mode, course_data):
"""
Build Sailthru purchase item object
:return: item
"""
# build item description
item = {
'id': "{}-{}".format(course_id_string, mode),
'url': course_url,
'price': cost_in_cents,
'qty': 1,
}
# make up title if we don't already have it from Sailthru
if 'title' in course_data:
item['title'] = course_data['title']
else:
item['title'] = 'Course {} mode: {}'.format(course_id_string, mode)
if 'tags' in course_data:
item['tags'] = course_data['tags']
# add vars to item
item['vars'] = dict(course_data.get('vars', {}), mode=mode, course_run_id=course_id_string)
return item
def _record_purchase(sailthru_client, email, item, message_id, options):
"""
Record a purchase in Sailthru
:param sailthru_client:
:param email:
:param item:
:param incomplete:
:param message_id:
:param options:
:return: False it retryable error
"""
try:
sailthru_response = sailthru_client.purchase(email, [item],
message_id=message_id,
options=options)
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
log.error("Error attempting to record purchase in Sailthru: %s", error.get_message())
return not _retryable_sailthru_error(error)
except SailthruClientError as exc:
log.error("Exception attempting to record purchase for %s in Sailthru - %s", email, unicode(exc))
return False
return True
def _get_course_content(course_url, sailthru_client, email_config):
"""
Get course information using the Sailthru content api.
If there is an error, just return with an empty response.
:param course_url:
:param sailthru_client:
:return: dict with course information
"""
# check cache first
response = cache.get(course_url)
if not response:
try:
sailthru_response = sailthru_client.api_get("content", {"id": course_url})
if not sailthru_response.is_ok():
return {}
response = sailthru_response.json
cache.set(course_url, response, email_config.sailthru_content_cache_age)
except SailthruClientError:
response = {}
return response
def _update_unenrolled_list(sailthru_client, email, course_url, unenroll):
"""
Maintain a list of courses the user has unenrolled from in the Sailthru user record
:param sailthru_client:
:param email:
:param email_config:
:param course_url:
:param unenroll:
:return: False if retryable error, else True
"""
try:
# get the user 'vars' values from sailthru
sailthru_response = sailthru_client.api_get("user", {"id": email, "fields": {"vars": 1}})
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
log.info("Error attempting to read user record from Sailthru: %s", error.get_message())
return not _retryable_sailthru_error(error)
response_json = sailthru_response.json
unenroll_list = []
if response_json and "vars" in response_json and response_json["vars"] \
and "unenrolled" in response_json["vars"]:
unenroll_list = response_json["vars"]["unenrolled"]
changed = False
# if unenrolling, add course to unenroll list
if unenroll:
if course_url not in unenroll_list:
unenroll_list.append(course_url)
changed = True
# if enrolling, remove course from unenroll list
elif course_url in unenroll_list:
unenroll_list.remove(course_url)
changed = True
if changed:
# write user record back
sailthru_response = sailthru_client.api_post(
"user", {'id': email, 'key': 'email', "vars": {"unenrolled": unenroll_list}})
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
log.info("Error attempting to update user record in Sailthru: %s", error.get_message())
return not _retryable_sailthru_error(error)
# everything worked
return True
except SailthruClientError as exc:
log.error("Exception attempting to update user record for %s in Sailthru - %s", email, unicode(exc))
return False
def _retryable_sailthru_error(error):
""" Return True if error should be retried.
9: Retryable internal error
43: Rate limiting response
others: Not retryable
See: https://getstarted.sailthru.com/new-for-developers-overview/api/api-response-errors/
"""
code = error.get_error_code()
return code == 9 or code == 43
|
bertucho/epic-movie-quotes-quiz
|
refs/heads/master
|
dialogos/build/Twisted/twisted/protocols/ident.py
|
51
|
# -*- test-case-name: twisted.test.test_ident -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Ident protocol implementation.
"""
import struct
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log, failure
_MIN_PORT = 1
_MAX_PORT = 2 ** 16 - 1
class IdentError(Exception):
"""
Can't determine connection owner; reason unknown.
"""
identDescription = 'UNKNOWN-ERROR'
def __str__(self):
return self.identDescription
class NoUser(IdentError):
"""
The connection specified by the port pair is not currently in use or
currently not owned by an identifiable entity.
"""
identDescription = 'NO-USER'
class InvalidPort(IdentError):
"""
Either the local or foreign port was improperly specified. This should
be returned if either or both of the port ids were out of range (TCP
port numbers are from 1-65535), negative integers, reals or in any
fashion not recognized as a non-negative integer.
"""
identDescription = 'INVALID-PORT'
class HiddenUser(IdentError):
"""
The server was able to identify the user of this port, but the
information was not returned at the request of the user.
"""
identDescription = 'HIDDEN-USER'
class IdentServer(basic.LineOnlyReceiver):
"""
The Identification Protocol (a.k.a., "ident", a.k.a., "the Ident
Protocol") provides a means to determine the identity of a user of a
particular TCP connection. Given a TCP port number pair, it returns a
character string which identifies the owner of that connection on the
server's system.
Server authors should subclass this class and override the lookup method.
The default implementation returns an UNKNOWN-ERROR response for every
query.
"""
def lineReceived(self, line):
parts = line.split(',')
if len(parts) != 2:
self.invalidQuery()
else:
try:
portOnServer, portOnClient = map(int, parts)
except ValueError:
self.invalidQuery()
else:
if _MIN_PORT <= portOnServer <= _MAX_PORT and _MIN_PORT <= portOnClient <= _MAX_PORT:
self.validQuery(portOnServer, portOnClient)
else:
self._ebLookup(failure.Failure(InvalidPort()), portOnServer, portOnClient)
def invalidQuery(self):
self.transport.loseConnection()
def validQuery(self, portOnServer, portOnClient):
"""
Called when a valid query is received to look up and deliver the
response.
@param portOnServer: The server port from the query.
@param portOnClient: The client port from the query.
"""
serverAddr = self.transport.getHost().host, portOnServer
clientAddr = self.transport.getPeer().host, portOnClient
defer.maybeDeferred(self.lookup, serverAddr, clientAddr
).addCallback(self._cbLookup, portOnServer, portOnClient
).addErrback(self._ebLookup, portOnServer, portOnClient
)
def _cbLookup(self, (sysName, userId), sport, cport):
self.sendLine('%d, %d : USERID : %s : %s' % (sport, cport, sysName, userId))
def _ebLookup(self, failure, sport, cport):
if failure.check(IdentError):
self.sendLine('%d, %d : ERROR : %s' % (sport, cport, failure.value))
else:
log.err(failure)
self.sendLine('%d, %d : ERROR : %s' % (sport, cport, IdentError(failure.value)))
def lookup(self, serverAddress, clientAddress):
"""Lookup user information about the specified address pair.
Return value should be a two-tuple of system name and username.
Acceptable values for the system name may be found online at::
U{http://www.iana.org/assignments/operating-system-names}
This method may also raise any IdentError subclass (or IdentError
itself) to indicate user information will not be provided for the
given query.
A Deferred may also be returned.
@param serverAddress: A two-tuple representing the server endpoint
of the address being queried. The first element is a string holding
a dotted-quad IP address. The second element is an integer
representing the port.
@param clientAddress: Like L{serverAddress}, but represents the
client endpoint of the address being queried.
"""
raise IdentError()
class ProcServerMixin:
"""Implements lookup() to grab entries for responses from /proc/net/tcp
"""
SYSTEM_NAME = 'LINUX'
try:
from pwd import getpwuid
def getUsername(self, uid, getpwuid=getpwuid):
return getpwuid(uid)[0]
del getpwuid
except ImportError:
def getUsername(self, uid):
raise IdentError()
def entries(self):
f = file('/proc/net/tcp')
f.readline()
for L in f:
yield L.strip()
def dottedQuadFromHexString(self, hexstr):
return '.'.join(map(str, struct.unpack('4B', struct.pack('=L', int(hexstr, 16)))))
def unpackAddress(self, packed):
addr, port = packed.split(':')
addr = self.dottedQuadFromHexString(addr)
port = int(port, 16)
return addr, port
def parseLine(self, line):
parts = line.strip().split()
localAddr, localPort = self.unpackAddress(parts[1])
remoteAddr, remotePort = self.unpackAddress(parts[2])
uid = int(parts[7])
return (localAddr, localPort), (remoteAddr, remotePort), uid
def lookup(self, serverAddress, clientAddress):
for ent in self.entries():
localAddr, remoteAddr, uid = self.parseLine(ent)
if remoteAddr == clientAddress and localAddr[1] == serverAddress[1]:
return (self.SYSTEM_NAME, self.getUsername(uid))
raise NoUser()
class IdentClient(basic.LineOnlyReceiver):
errorTypes = (IdentError, NoUser, InvalidPort, HiddenUser)
def __init__(self):
self.queries = []
def lookup(self, portOnServer, portOnClient):
"""Lookup user information about the specified address pair.
"""
self.queries.append((defer.Deferred(), portOnServer, portOnClient))
if len(self.queries) > 1:
return self.queries[-1][0]
self.sendLine('%d, %d' % (portOnServer, portOnClient))
return self.queries[-1][0]
def lineReceived(self, line):
if not self.queries:
log.msg("Unexpected server response: %r" % (line,))
else:
d, _, _ = self.queries.pop(0)
self.parseResponse(d, line)
if self.queries:
self.sendLine('%d, %d' % (self.queries[0][1], self.queries[0][2]))
def connectionLost(self, reason):
for q in self.queries:
q[0].errback(IdentError(reason))
self.queries = []
def parseResponse(self, deferred, line):
parts = line.split(':', 2)
if len(parts) != 3:
deferred.errback(IdentError(line))
else:
ports, type, addInfo = map(str.strip, parts)
if type == 'ERROR':
for et in self.errorTypes:
if et.identDescription == addInfo:
deferred.errback(et(line))
return
deferred.errback(IdentError(line))
else:
deferred.callback((type, addInfo))
__all__ = ['IdentError', 'NoUser', 'InvalidPort', 'HiddenUser',
'IdentServer', 'IdentClient',
'ProcServerMixin']
|
dtebbs/turbulenz_engine
|
refs/heads/master
|
scripts/utils.py
|
7
|
import platform
import sys
import os
import os.path
import re
import errno
import stat
from subprocess import Popen, PIPE, STDOUT
from shutil import rmtree, copy, Error as ShError
#######################################################################################################################
SYSNAME = platform.system()
if SYSNAME == 'Linux':
if platform.machine() == 'x86_64':
TURBULENZOS = 'linux64'
else:
TURBULENZOS = 'linux32'
elif SYSNAME == 'Windows':
TURBULENZOS = 'win32'
elif SYSNAME == 'Darwin':
TURBULENZOS = 'macosx'
else:
echo('unknown os')
exit(1)
PYTHON = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
ENV = 'env'
TURBULENZROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Required to get the git commands working on Windows
if not 'HOME' in os.environ:
os.environ['HOME'] = '%s%s' % (os.environ['HOMEDRIVE'], os.environ['HOMEPATH'])
#######################################################################################################################
def echo(message=''):
print message
def log(message):
echo(' >> ' + message)
COLORED_OUTPUT = sys.stdout.isatty() and (SYSNAME != 'Windows' or 'ANSICON' in os.environ)
def error(message):
if COLORED_OUTPUT:
log('\033[1m\033[31m[ERROR]\033[0m - %s' % message)
else:
log('[ERROR] - %s' % message)
# pylint: disable=C0103
def ok(message):
if COLORED_OUTPUT:
log('\033[32m[OK]\033[0m - %s' % message)
else:
log('[OK] - %s' % message)
# pylint: enable=C0103
def warning(message):
if COLORED_OUTPUT:
log('\033[1m\033[33m[WARNING]\033[0m - %s' % message)
else:
log('[WARNING] - %s' % message)
#######################################################################################################################
# pylint: disable=C0103
def cp(src, dst, verbose=True):
if verbose:
echo('Copying: %s -> %s' % (os.path.basename(src), os.path.basename(dst)))
try:
copy(src, dst)
except (ShError, IOError) as e:
error(str(e))
# pylint: enable=C0103
# pylint: disable=C0103
def rm(filename, verbose=True):
if verbose:
echo('Removing: %s' % filename)
try:
os.remove(filename)
except OSError as _:
pass
# pylint: enable=C0103
def mkdir(path, verbose=True):
if verbose:
echo('Creating: %s' % path)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def rmdir(path, verbose=True):
def _handle_remove_readonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
else:
raise
if verbose:
echo('Removing: %s' % path)
try:
rmtree(path, onerror=_handle_remove_readonly)
except OSError:
pass
#######################################################################################################################
# pylint: disable=W0231
class CalledProcessError(Exception):
def __init__(self, retcode, cmd, output=None):
self.retcode = retcode
self.cmd = cmd
self.output = output
def __str__(self):
cmd = self.cmd
if isinstance(cmd, list):
cmd = ' '.join(cmd)
return "Command '%s' returned non-zero exit status %d" % (cmd, self.retcode)
# pylint: enable=W0231
# pylint: disable=C0103
def sh(command, cwd=None, env=None, verbose=True, console=False, ignore=False, shell=False, wait=True):
if isinstance(command, list):
command_list = command
command_string = ' '.join(command)
else:
command_list = command.split()
command_string = command
if verbose:
echo('Executing: %s' % command_string)
if wait:
if console:
process = Popen(command_list, stderr=STDOUT, cwd=cwd, shell=shell, env=env)
else:
process = Popen(command_list, stdout=PIPE, stderr=STDOUT, cwd=cwd, shell=shell, env=env)
output, _ = process.communicate()
output = str(output)
retcode = process.poll()
if retcode:
if ignore is False:
raise CalledProcessError(retcode, command_list, output=output)
if output is not None:
output = output.rstrip()
return output
else:
if SYSNAME == 'Windows':
DETACHED_PROCESS = 0x00000008
return Popen(command_list, creationflags=DETACHED_PROCESS, cwd=cwd, shell=shell, env=env)
else:
return Popen(command_list, stdout=PIPE, stderr=STDOUT, cwd=cwd, shell=shell, env=env)
# pylint: enable=C0103
#######################################################################################################################
def command_no_arguments(fn):
def new(arguments=None):
return fn()
return new
def command_with_arguments(fn):
def new(arguments=None, *args, **kwargs):
return fn(arguments or [], *args, **kwargs)
return new
def command_requires_env(fn):
virtual_env = os.environ.get('VIRTUAL_ENV', None)
if virtual_env:
def new(*args, **kwargs):
return fn(*args, **kwargs)
else:
def new(*args, **kwargs):
error('Virtualenv not activated, required for: %s' % sys.argv[1])
return new
#######################################################################################################################
def check_documentation_links(build_path):
bad_link_regex = [re.compile('.*<em class="xref std std-ref">.*<\/em>.*'),
re.compile('.*?:ref:?.*')]
result = 0
for (dirpath, _, filenames) in os.walk(build_path):
for f in filenames:
if os.path.splitext(f)[1] == '.html':
file_path = os.path.join(dirpath, f)
html_file = open(file_path, 'r')
html = html_file.read()
for regex in bad_link_regex:
match = regex.search(html)
if match:
result += 1
warning(file_path)
error('Broken or malformed link with contents "%s"' % match.group(0))
html_file.close()
if result > 0:
error('%d broken or malformed link%s' % (result, 's' if result > 1 else ''))
return result
#######################################################################################################################
if platform.system() == "Windows":
# pylint: disable=W0404
# pylint: disable=F0401, E0602
def find_devenv(versions_to_check=None):
from _winreg import OpenKey, QueryValueEx, HKEY_LOCAL_MACHINE, KEY_WOW64_32KEY, KEY_READ
try:
sxs_key = OpenKey(HKEY_LOCAL_MACHINE, 'SOFTWARE\Microsoft\VisualStudio\SxS\VS7',
0, KEY_READ | KEY_WOW64_32KEY)
except WindowsError:
sxs_key = None
if not sxs_key:
return None, None, None
def _query_key_value(key, value):
try:
result, _ = QueryValueEx(key, value)
except WindowsError:
result = None
return result
versions_to_check = versions_to_check or ['9.0', '10.0', '11.0', '12.0']
if '12.0' in versions_to_check:
vs_path = _query_key_value(sxs_key, '12.0')
if vs_path is not None:
devenv_path = os.path.join(vs_path, 'Common7', 'IDE', 'devenv.com')
if os.path.exists(devenv_path):
return (devenv_path, '2013', None)
if '11.0' in versions_to_check:
vs_path = _query_key_value(sxs_key, '11.0')
if vs_path is not None:
devenv_path = os.path.join(vs_path, 'Common7', 'IDE', 'devenv.com')
if os.path.exists(devenv_path):
return (devenv_path, '2012', None)
if '10.0' in versions_to_check:
vs_path = _query_key_value(sxs_key, '10.0')
if vs_path is not None:
devenv_path = os.path.join(vs_path, 'Common7', 'IDE', 'devenv.com')
if os.path.exists(devenv_path):
return (devenv_path, '2010', None)
devenv_path = os.path.join(vs_path, 'Common7', 'IDE', 'VCExpress.exe')
if os.path.exists(devenv_path):
return (devenv_path, '2010', None)
if '9.0' in versions_to_check:
vs_path = _query_key_value(sxs_key, '9.0')
if vs_path is not None:
devenv_path = os.path.join(vs_path, 'Common7', 'IDE', 'devenv.com')
if os.path.exists(devenv_path):
return (devenv_path, '2008', None)
# If we didn't find a devenv like tool try msbuild for Visual Studio 11.0 or Visual Studio 12.0
if '11.0' in versions_to_check:
vs_path = _query_key_value(sxs_key, '11.0')
if vs_path is not None:
# Query the key in two steps because Python can't seem to read the 4.0 key in a
msbuild_basekey = OpenKey(HKEY_LOCAL_MACHINE, 'SOFTWARE\Microsoft\MSBuild\ToolsVersions',
0, KEY_READ | KEY_WOW64_32KEY)
msbuild_key = OpenKey(msbuild_basekey, '4.0', 0, KEY_READ | KEY_WOW64_32KEY)
msbuild_path = _query_key_value(msbuild_key, 'MSBuildToolsPath')
if msbuild_path:
return None, '2012', os.path.join(msbuild_path, 'MSBuild.exe')
if '12.0' in versions_to_check:
vs_path = _query_key_value(sxs_key, '12.0')
if vs_path is not None:
# Query the key in two steps because Python can't seem to read the 4.0 key in a
msbuild_basekey = OpenKey(HKEY_LOCAL_MACHINE, 'SOFTWARE\Microsoft\MSBuild\ToolsVersions',
0, KEY_READ | KEY_WOW64_32KEY)
msbuild_key = OpenKey(msbuild_basekey, '4.0', 0, KEY_READ | KEY_WOW64_32KEY)
msbuild_path = _query_key_value(msbuild_key, 'MSBuildToolsPath')
if msbuild_path:
return None, '2013', os.path.join(msbuild_path, 'MSBuild.exe')
return None, None, None
# pylint: enable=F0401, E0602
# pylint: enable=W0404
def check_compilers():
# pylint: disable=F0401
try:
from distutils.msvccompiler import get_build_version as get_python_build_compiler
from distutils.msvc9compiler import query_vcvarsall
except ImportError:
# We could implement our own checks but distutils should be available, send a warning
raise EnvironmentError('Failed to import distutils, not able to confirm compiler toolchain is present')
# pylint: enable=F0401
_, version, _ = find_devenv()
if version == None:
raise EnvironmentError('Failed to find any Visual Studio installed')
versions_map = {
'2008': 9.0,
'2010': 10.0,
'2012': 11.0,
'2013': 12.0
}
# Turbulenz tools are built 32bit so always check for these compilers
try:
query_vcvarsall(versions_map[version], 'x86')
except ValueError:
raise EnvironmentError('Setuptools unable to detect Visual Studio Compilers correctly')
arch, _ = platform.architecture()
if arch == '64bit':
_, python_build_version, _ = find_devenv([str(get_python_build_compiler())])
if python_build_version is not None:
# We're running 64bit Python and the user has the Visual Studio version used to build Python
# installed, check for the 64bit compilers
try:
query_vcvarsall(versions_map[version], 'amd64')
except ValueError:
raise EnvironmentError('Setuptools unable to detect Visual Studio Compilers correctly.\n'
'You appear to be running 64bit Python, ensure you install the '
'64bit compilers in Visual Studio')
elif arch != '32bit':
raise EnvironmentError('Unexpected Python architecture, not able to'
' confirm compiler toolchain is present')
else:
def find_devenv():
return None, None, None
def check_compilers():
# This could be implemented but it's only Windows that causes us most of the issues
pass
|
MarcJoan/django
|
refs/heads/master
|
tests/expressions/tests.py
|
43
|
from __future__ import unicode_literals
import datetime
import uuid
from copy import deepcopy
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, models, transaction
from django.db.models import TimeField, UUIDField
from django.db.models.aggregates import (
Avg, Count, Max, Min, StdDev, Sum, Variance,
)
from django.db.models.expressions import (
F, Case, Col, Date, DateTime, ExpressionWrapper, Func, OrderBy, Random,
RawSQL, Ref, Value, When,
)
from django.db.models.functions import (
Coalesce, Concat, Length, Lower, Substr, Upper,
)
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from django.utils.timezone import utc
from .models import UUID, Company, Employee, Experiment, Number, Time
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10)
)
Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20)
)
Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Max", lastname="Mustermann", salary=30)
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
def test_annotate_values_aggregate(self):
companies = Company.objects.annotate(
salaries=F('ceo__salary'),
).values('num_employees', 'salaries').aggregate(
result=Sum(F('salaries') + F('num_employees'),
output_field=models.IntegerField()),
)
self.assertEqual(companies['result'], 2395)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertQuerysetEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
lambda o: o
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertQuerysetEqual(
self.company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
lambda o: o
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 2302,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_order_of_operations(self):
# Law of order of operations is followed
self. company_query.update(
num_chairs=F('num_employees') + 2 * F('num_employees')
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 6900,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=((F('num_employees') + 2) * F('num_employees'))
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 5294600,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(
Company.objects.update(point_of_contact=F('ceo')),
3
)
self.assertQuerysetEqual(
Company.objects.all(), [
"Joe Smith",
"Frank Meyer",
"Max Mustermann",
],
lambda c: six.text_type(c.point_of_contact),
ordered=False
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [
None,
None,
],
lambda n: n.float,
ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F('ceo'))
c = Company.objects.all()[0]
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
"Foobar Ltd.",
"Test GmbH",
],
lambda c: c.name,
ordered=False
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
with transaction.atomic():
with self.assertRaises(FieldError):
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name="Test GmbH")
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F("num_employees") + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
def test_new_object_save(self):
# We should be able to use Funcs when inserting new data
test_co = Company(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
)
test_co.save()
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_new_object_create(self):
test_co = Company.objects.create(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
)
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_object_create_with_aggregate(self):
# Aggregates are not allowed when inserting new data
with self.assertRaisesMessage(FieldError, 'Aggregate functions are not allowed in this query'):
Company.objects.create(
name='Company', num_employees=Max(Value(1)), num_chairs=1,
ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(name="Test GmbH")
def test():
test_gmbh.point_of_contact = F("ceo")
self.assertRaises(ValueError, test)
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__last_name")
self.assertRaises(FieldError, test_gmbh.save)
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
test_gmbh = Company.objects.get(name="Test GmbH")
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5,
ceo=test_gmbh.ceo
)
acme.num_employees = F("num_employees") + 16
msg = (
'Failed to insert expression "Col(expressions_company, '
'expressions.Company.num_employees) + Value(16)" on '
'expressions.Company.num_employees. F() expressions can only be '
'used to update, not to insert.'
)
self.assertRaisesMessage(ValueError, msg, acme.save)
acme.num_employees = 12
acme.name = Lower(F('name'))
msg = (
'Failed to insert expression "Lower(Col(expressions_company, '
'expressions.Company.name))" on expressions.Company.name. F() '
'expressions can only be used to update, not to insert.'
)
self.assertRaisesMessage(ValueError, msg, acme.save)
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F('lastname'))
self.assertQuerysetEqual(queryset, ["<Employee: Test test>"])
@skipIfDBFeature('has_case_insensitive_like')
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertQuerysetEqual(
Employee.objects.filter(lastname__startswith=F('firstname')),
[e2], lambda x: x)
self.assertQuerysetEqual(
Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk'),
[e2, e3], lambda x: x)
def test_ticket_18375_join_reuse(self):
# Test that reverse multijoin F() references and the lookup target
# the same join. Pre #18375 the F() join was generated first, and the
# lookup couldn't reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
company_ceo_set__num_chairs__gte=1)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk'),
pk=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_chained_filters(self):
# Test that F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk')
).filter(
company_ceo_set__num_employees=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
class ExpressionsTests(TestCase):
def test_F_object_deepcopy(self):
"""
Make sure F objects can be deepcopied (#23492)
"""
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_f_reuse(self):
f = F('id')
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%Joh\\n"),
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="John"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__contains=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__startswith=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__endswith=F('lastname')),
["<Employee: Jean-Claude Claude>"],
ordered=False)
def test_insensitive_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%joh\\n"),
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="john"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__icontains=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__istartswith=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__iendswith=F('lastname')),
["<Employee: Jean-Claude claude>"],
ordered=False)
class ExpressionsNumericTests(TestCase):
def setUp(self):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
self.assertEqual(Number.objects.update(float=F('integer')), 3)
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 42, 42.000>',
'<Number: 1337, 1337.000>'
],
ordered=False
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
[
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk).update(
float=F('integer') + F('float') * 2), 1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
def test_incorrect_field_expression(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword u?'nope' into field.*"):
list(Employee.objects.filter(firstname=F('nope')))
class ExpressionOperatorTests(TestCase):
def setUp(self):
self.n = Number.objects.create(integer=42, float=15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15,
float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15,
float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2,
float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
@skipUnlessDBFeature('supports_bitwise_or')
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_power(self):
# LH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2,
float=F('float') ** 1.5)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'),
float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'),
float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'),
float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'),
float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_righthand_power(self):
# RH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'),
float=1.5 ** F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3))
class FTimeDeltaTests(TestCase):
def setUp(self):
self.sday = sday = datetime.date(2010, 6, 25)
self.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
# Test data is set so that deltas and delays will be
# strictly increasing.
self.deltas = []
self.delays = []
self.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
e0 = Experiment.objects.create(name='e0', assigned=sday, start=stime,
end=end, completed=end.date(), estimated_time=delta0)
self.deltas.append(delta0)
self.delays.append(e0.start -
datetime.datetime.combine(e0.assigned, midnight))
self.days_long.append(e0.completed - e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite. This Experiment is only
# included in the test data when the DB supports microsecond
# precision.
if connection.features.supports_microsecond_precision:
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(name='e1', assigned=sday,
start=stime + delay, end=end, completed=end.date(), estimated_time=delta1)
self.deltas.append(delta1)
self.delays.append(e1.start -
datetime.datetime.combine(e1.assigned, midnight))
self.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(name='e2',
assigned=sday - datetime.timedelta(3), start=stime, end=end,
completed=end.date(), estimated_time=datetime.timedelta(hours=1))
self.deltas.append(delta2)
self.delays.append(e2.start -
datetime.datetime.combine(e2.assigned, midnight))
self.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(name='e3',
assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3)
self.deltas.append(delta3)
self.delays.append(e3.start -
datetime.datetime.combine(e3.assigned, midnight))
self.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(name='e4',
assigned=sday - datetime.timedelta(10), start=stime, end=end,
completed=end.date(), estimated_time=delta4 - datetime.timedelta(1))
self.deltas.append(delta4)
self.delays.append(e4.start -
datetime.datetime.combine(e4.assigned, midnight))
self.days_long.append(e4.completed - e4.assigned)
self.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643 - Crash when compiling query more than once
qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
# Intentionally no assert
def test_delta_add(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lt=delta + F('start'))]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_subtract(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(start__gt=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__gte=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_exclude(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.exclude(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in
Experiment.objects.exclude(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i + 1:])
def test_date_comparison(self):
for i in range(len(self.days_long)):
days = self.days_long[i]
test_set = [e.name for e in
Experiment.objects.filter(completed__lt=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(completed__lte=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i + 1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i in range(len(self.delays)):
delay = self.delays[i]
if not connection.features.supports_microsecond_precision:
delay = datetime.timedelta(delay.days, delay.seconds)
test_set = [e.name for e in
Experiment.objects.filter(assigned__gt=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(assigned__gte=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_mixed_comparisons2(self):
delays = [datetime.timedelta(delay.days) for delay in self.delays]
for i in range(len(delays)):
delay = delays[i]
test_set = [e.name for e in
Experiment.objects.filter(start__lt=F('assigned') + delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__lte=F('assigned') + delay +
datetime.timedelta(1))]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_update(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F('start') + delta, end=F('end') + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [e.name for e in
Experiment.objects.filter(start=F('start') + F('estimated_time'))]
self.assertEqual(zeros, ['e0'])
end_less = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))]
self.assertEqual(end_less, ['e2'])
delta_math = [e.name for e in
Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1))]
self.assertEqual(delta_math, ['e4'])
@skipUnlessDBFeature("has_native_duration_field")
def test_date_subtraction(self):
under_estimate = [e.name for e in
Experiment.objects.filter(estimated_time__gt=F('end') - F('start'))]
self.assertEqual(under_estimate, ['e2'])
over_estimate = [e.name for e in
Experiment.objects.filter(estimated_time__lt=F('end') - F('start'))]
self.assertEqual(over_estimate, ['e4'])
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = Experiment.objects.exclude(name='e1').filter(
completed__gt=self.stime + F('estimated_time'),
).order_by('name')
self.assertQuerysetEqual(over_estimate, ['e3', 'e4'], lambda e: e.name)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField()))
self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012'))
class ReprTests(TestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>"
)
self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)")
self.assertEqual(repr(Date('published', 'exact')), "Date(published, exact)")
self.assertEqual(repr(DateTime('published', 'exact', utc)), "DateTime(published, exact, %s)" % utc)
self.assertEqual(repr(F('published')), "F(published)")
self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>")
self.assertEqual(
repr(ExpressionWrapper(F('cost') + F('tax'), models.IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))"
)
self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)")
self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)')
self.assertEqual(repr(Random()), "Random()")
self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])")
self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))")
self.assertEqual(repr(Value(1)), "Value(1)")
def test_functions(self):
self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length('a')), "Length(F(a))")
self.assertEqual(repr(Lower('a')), "Lower(F(a))")
self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper('a')), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg('a')), "Avg(F(a))")
self.assertEqual(repr(Count('a')), "Count(F(a), distinct=False)")
self.assertEqual(repr(Count('*')), "Count('*', distinct=False)")
self.assertEqual(repr(Max('a')), "Max(F(a))")
self.assertEqual(repr(Min('a')), "Min(F(a))")
self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum('a')), "Sum(F(a))")
self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)")
|
bac/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/identity/identity_providers/protocols/tables.py
|
8
|
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
class AddProtocol(policy.PolicyTargetMixin, tables.LinkAction):
name = "create"
verbose_name = _("Add Protocol")
url = "horizon:identity:identity_providers:protocols:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_protocol"),)
def get_link_url(self, datum=None):
idp_id = self.table.kwargs['identity_provider_id']
return reverse(self.url, args=(idp_id,))
class RemoveProtocol(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Protocol",
u"Delete Protocols",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Protocol",
u"Deleted Protocols",
count
)
policy_rules = (("identity", "identity:delete_protocol"),)
def delete(self, request, obj_id):
identity_provider = self.table.kwargs['identity_provider_id']
protocol = obj_id
api.keystone.protocol_delete(request, identity_provider, protocol)
class ProtocolsTable(tables.DataTable):
protocol = tables.Column("id",
verbose_name=_("Protocol ID"))
mapping = tables.Column("mapping_id",
verbose_name=_("Mapping ID"))
def get_object_display(self, datum):
return datum.id
class Meta(object):
name = "idp_protocols"
verbose_name = _("Protocols")
table_actions = (AddProtocol, RemoveProtocol)
row_actions = (RemoveProtocol, )
|
shubhamdhama/zulip
|
refs/heads/master
|
zerver/migrations/0099_index_wildcard_mentioned_user_messages.py
|
7
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0098_index_has_alert_word_user_messages'),
]
operations = [
migrations.RunSQL(
'''
CREATE INDEX IF NOT EXISTS zerver_usermessage_wildcard_mentioned_message_id
ON zerver_usermessage (user_profile_id, message_id)
WHERE (flags & 8) != 0 OR (flags & 16) != 0;
''',
reverse_sql='DROP INDEX zerver_usermessage_wilcard_mentioned_message_id;',
),
]
|
rooi/CouchPotatoServer
|
refs/heads/master
|
libs/sqlalchemy/dialects/sqlite/pysqlite.py
|
18
|
# sqlite/pysqlite.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the SQLite database via pysqlite.
Note that pysqlite is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)
Full documentation on pysqlite is available at:
`<http://www.initd.org/pub/software/pysqlite/doc/usage-guide.html>`_
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database" portion of
the URL. Note that the format of a url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to the
**right** of the third slash. So connecting to a relative filepath looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you need **four**
slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be used.
Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is present. Specify
``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the DATETIME
or TIME types...confused yet ?) will not perform any bind parameter or result
processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result processing.
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is originally intended to work with older versions
of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that pysqlite
connections are still not safe to use in concurrently in multiple threads.
In particular, any statement execution calls would need to be externally
mutexed, as Pysqlite does not provide for thread-safe propagation of error
messages among other things. So while even ``:memory:`` databases can be
shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default will use
:class:`.SingletonThreadPool`. This pool maintains a single connection per
thread, so that all access to the engine within the current thread use the
same ``:memory:`` database - other threads would access a different
``:memory:`` database.
* When a file-based database is specified, the dialect will use :class:`.NullPool`
as the source of connections. This pool closes and discards connections
which are returned to the pool immediately. SQLite file-based connections
have extremely low overhead, so pooling is not necessary. The scheme also
prevents a connection from being used again in a different thread and works
best with SQLite's coarse-grained file locking.
.. versionchanged:: 0.7
Default selection of :class:`.NullPool` for SQLite file-based databases.
Previous versions select :class:`.SingletonThreadPool` by
default for all SQLite databases.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same connection
object must be shared among threads, since the database exists
only within the scope of that connection. The :class:`.StaticPool` implementation
will maintain a single connection globally, and the ``check_same_thread`` flag
can be passed to Pysqlite as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a temporary table
in a file-based SQLite database across multiple checkouts from the connection pool, such
as when using an ORM :class:`.Session` where the temporary table should continue to remain
after :meth:`.commit` or :meth:`.rollback` is called,
a pool which maintains a single connection must be used. Use :class:`.SingletonThreadPool`
if the scope is only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number of threads
that are to be used; beyond that number, connections will be closed out in a non deterministic
way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets, never
plain strings, and accommodates ``unicode`` objects within bound parameter
values in all cases. Regardless of the SQLAlchemy string type in use,
string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
.. _pysqlite_serializable:
Serializable Transaction Isolation
----------------------------------
The pysqlite DBAPI driver has a long-standing bug in which transactional
state is not begun until the first DML statement, that is INSERT, UPDATE
or DELETE, is emitted. A SELECT statement will not cause transactional
state to begin. While this mode of usage is fine for typical situations
and has the advantage that the SQLite database file is not prematurely
locked, it breaks serializable transaction isolation, which requires
that the database file be locked upon any SQL being emitted.
To work around this issue, the ``BEGIN`` keyword can be emitted
at the start of each transaction. The following recipe establishes
a :meth:`.ConnectionEvents.begin` handler to achieve this::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db", isolation_level='SERIALIZABLE')
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.execute("BEGIN")
"""
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE
from sqlalchemy import exc, pool
from sqlalchemy import types as sqltypes
from sqlalchemy import util
import os
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = 'qmark'
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date:_SQLite_pysqliteDate,
sqltypes.TIMESTAMP:_SQLite_pysqliteTimeStamp,
}
)
# Py3K
#description_encoding = None
driver = 'pysqlite'
def __init__(self, **kwargs):
SQLiteDialect.__init__(self, **kwargs)
if self.dbapi is not None:
sqlite_ver = self.dbapi.version_info
if sqlite_ver < (2, 1, 3):
util.warn(
("The installed version of pysqlite2 (%s) is out-dated "
"and will cause errors in some cases. Version 2.1.3 "
"or greater is recommended.") %
'.'.join([str(subver) for subver in sqlite_ver]))
@classmethod
def dbapi(cls):
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError, e:
try:
from sqlite3 import dbapi2 as sqlite #try the 2.5+ stdlib name.
except ImportError:
raise e
return sqlite
@classmethod
def get_pool_class(cls, url):
if url.database and url.database != ':memory:':
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,))
filename = url.database or ':memory:'
if filename != ':memory:':
filename = os.path.abspath(filename)
opts = url.query.copy()
util.coerce_kw_type(opts, 'timeout', float)
util.coerce_kw_type(opts, 'isolation_level', str)
util.coerce_kw_type(opts, 'detect_types', int)
util.coerce_kw_type(opts, 'check_same_thread', bool)
util.coerce_kw_type(opts, 'cached_statements', int)
return ([filename], opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.ProgrammingError) and \
"Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
|
jorik041/scikit-learn
|
refs/heads/master
|
sklearn/tests/test_grid_search.py
|
68
|
"""
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
|
Luindil/Glassure
|
refs/heads/develop
|
glassure/gui/widgets/control/soller.py
|
1
|
# -*- coding: utf-8 -*-
from ...qt import QtGui, QtWidgets, Signal
from ..custom import HorizontalLine, ValueLabelTxtPair
class SollerWidget(QtWidgets.QWidget):
soller_parameters_changed = Signal()
def __init__(self, *args):
super(SollerWidget, self).__init__(*args)
self.create_layout_and_widgets()
self.style_widgets()
self.create_signals()
self.param_widget.setVisible(False)
self.activate_cb.setChecked(False)
def create_layout_and_widgets(self):
self.main_layout = QtWidgets.QVBoxLayout()
self.activate_cb = QtWidgets.QCheckBox("activate")
self.main_layout.addWidget(self.activate_cb)
self.main_layout.addWidget(HorizontalLine())
self.param_layout = QtWidgets.QGridLayout()
self.thickness_txt = ValueLabelTxtPair("Sample thickness:", 0.2, "mm", self.param_layout, 0)
self.wavelength_txt = ValueLabelTxtPair("X-ray wavelength:", 0.31, "A", self.param_layout, 1)
self.param_layout.addWidget(HorizontalLine(), 2, 0, 1, 3)
self.inner_radius_txt = ValueLabelTxtPair("Inner radius:", '', "mm", self.param_layout, 4)
self.outer_radius_txt = ValueLabelTxtPair("Outer radius:", '', "mm", self.param_layout, 5)
self.inner_width_txt = ValueLabelTxtPair("Inner width:", '', "mm", self.param_layout, 6)
self.outer_width_txt = ValueLabelTxtPair("Outer width:", '', "mm", self.param_layout, 7)
self.inner_length_txt = ValueLabelTxtPair("Inner length:", '', "mm", self.param_layout, 8)
self.outer_length_txt = ValueLabelTxtPair("Inner length:", '', "mm", self.param_layout, 9)
self.param_widget = QtWidgets.QWidget()
self.param_widget.setLayout(self.param_layout)
self.main_layout.addWidget(self.param_widget)
self.setLayout(self.main_layout)
def style_widgets(self):
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.setSpacing(5)
self.param_layout.setContentsMargins(50, 0, 0, 0)
self.param_layout.setVerticalSpacing(7)
#
def create_signals(self):
self.activate_cb.stateChanged.connect(self.param_widget.setVisible)
self.activate_cb.stateChanged.connect(self.soller_parameters_changed.emit)
self.thickness_txt.editingFinished.connect(self.soller_parameters_changed.emit)
self.wavelength_txt.editingFinished.connect(self.soller_parameters_changed.emit)
self.inner_radius_txt.editingFinished.connect(self.soller_parameters_changed.emit)
self.outer_radius_txt.editingFinished.connect(self.soller_parameters_changed.emit)
self.inner_width_txt.editingFinished.connect(self.soller_parameters_changed.emit)
self.outer_width_txt.editingFinished.connect(self.soller_parameters_changed.emit)
self.inner_length_txt.editingFinished.connect(self.soller_parameters_changed.emit)
self.outer_length_txt.editingFinished.connect(self.soller_parameters_changed.emit)
def get_parameters(self):
return {"sample_thickness": self.thickness_txt.get_value(),
"wavelength": self.wavelength_txt.get_value(),
"inner_radius": self.inner_radius_txt.get_value(),
"outer_radius": self.outer_radius_txt.get_value(),
"inner_width": self.inner_width_txt.get_value(),
"outer_width": self.outer_width_txt.get_value(),
"inner_length": self.inner_length_txt.get_value(),
"outer_length": self.outer_length_txt.get_value()}
def set_parameters(self, parameter):
self.blockSignals(True)
self.thickness_txt.set_value(parameter["sample_thickness"])
self.wavelength_txt.set_value(parameter["wavelength"])
self.inner_radius_txt.set_value(parameter["inner_radius"])
self.outer_radius_txt.set_value(parameter["outer_radius"])
self.inner_width_txt.set_value(parameter["inner_width"])
self.outer_width_txt.set_value(parameter["outer_width"])
self.inner_length_txt.set_value(parameter["inner_length"])
self.outer_length_txt.set_value(parameter["outer_length"])
self.blockSignals(False)
|
ict-felix/stack
|
refs/heads/master
|
modules/resource/utilities/rspecs/tnrm/request_formatter.py
|
2
|
from rspecs.commons import DEFAULT_XMLNS, DEFAULT_XS, DEFAULT_SCHEMA_LOCATION,\
DSL_PREFIX
from rspecs.commons_tn import DEFAULT_SHARED_VLAN, generate_unique_link_id
from rspecs.formatter_base import FormatterBase
from lxml import etree
DEFAULT_REQ_SCHEMA_LOCATION = DEFAULT_SCHEMA_LOCATION
DEFAULT_REQ_SCHEMA_LOCATION += DSL_PREFIX + "3/request.xsd "
DEFAULT_REQ_SCHEMA_LOCATION += DSL_PREFIX + "ext/shared-vlan/1/request.xsd"
class TNRMv3RequestFormatter(FormatterBase):
def __init__(self, xmlns=DEFAULT_XMLNS, xs=DEFAULT_XS,
sharedvlan=DEFAULT_SHARED_VLAN,
felix=None,
schema_location=DEFAULT_REQ_SCHEMA_LOCATION):
nmap = {"sharedvlan": "%s" % (sharedvlan)}
if felix is not None:
nmap["felix"] = "%s" % (felix)
super(TNRMv3RequestFormatter, self).__init__(
"request", schema_location, nmap, xmlns, xs)
self.__sv = sharedvlan
def node(self, n):
node_ = etree.SubElement(self.rspec, "{%s}node" % (self.xmlns))
node_.attrib["client_id"] = n.get("component_id")
node_.attrib["component_manager_id"] = n.get("component_manager_id")
if n.get("exclusive") is not None:
node_.attrib["exclusive"] = n.get("exclusive")
if n.get("sliver_type_name") is not None:
sliver_ = etree.SubElement(node_, "{%s}sliver_type" % (self.xmlns))
sliver_.attrib["name"] = n.get("sliver_type_name")
for i in n.get("interfaces"):
intf_ = etree.SubElement(node_, "{%s}interface" % (self.xmlns))
intf_.attrib["client_id"] = i.get("component_id")
for v in i.get("vlan"):
svlan_ = etree.SubElement(intf_,
"{%s}link_shared_vlan" % (self.__sv))
svlan_.attrib["vlantag"] = v.get("tag")
if v.get("name") is not None:
svlan_.attrib["name"] = v.get("name")
if v.get("description") is not None:
svlan_.attrib["description"] = v.get("description")
def link(self, l):
link_ = etree.SubElement(self.rspec, "{%s}link" % (self.xmlns))
link_.attrib["client_id"] = l.get("component_id")
mgr_ = etree.SubElement(link_, "{%s}component_manager" % (self.xmlns))
mgr_.attrib["name"] = l.get("component_manager_name")
for i in l.get("interface_ref"):
ifr_ = etree.SubElement(link_, "{%s}interface_ref" % (self.xmlns))
ifr_.attrib["client_id"] = i.get("component_id")
for p in l.get("property"):
prop_ = etree.SubElement(link_, "{%s}property" % (self.xmlns))
prop_.attrib["source_id"] = p.get("source_id")
prop_.attrib["dest_id"] = p.get("dest_id")
prop_.attrib["capacity"] = p.get("capacity")
if len(l.get("property")) > 0:
# Here we change the link-id attribute.
# It should be a unique value for the TNRM module!
p0 = l.get("property")[0]
link_.attrib["client_id"] = generate_unique_link_id(
l.get("component_id"), p0.get("source_id"), p0.get("dest_id"))
|
r0e/servo
|
refs/heads/master
|
python/mach/mach/decorators.py
|
96
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, unicode_literals
import argparse
import collections
import inspect
import types
from .base import MachError
from .config import ConfigProvider
from .registrar import Registrar
class _MachCommand(object):
"""Container for mach command metadata.
Mach commands contain lots of attributes. This class exists to capture them
in a sane way so tuples, etc aren't used instead.
"""
__slots__ = (
# Content from decorator arguments to define the command.
'name',
'subcommand',
'category',
'description',
'conditions',
'_parser',
'arguments',
'argument_group_names',
# Describes how dispatch is performed.
# The Python class providing the command. This is the class type not
# an instance of the class. Mach will instantiate a new instance of
# the class if the command is executed.
'cls',
# Whether the __init__ method of the class should receive a mach
# context instance. This should only affect the mach driver and how
# it instantiates classes.
'pass_context',
# The name of the method providing the command. In other words, this
# is the str name of the attribute on the class type corresponding to
# the name of the function.
'method',
# Dict of string to _MachCommand defining sub-commands for this
# command.
'subcommand_handlers',
)
def __init__(self, name=None, subcommand=None, category=None,
description=None, conditions=None, parser=None):
self.name = name
self.subcommand = subcommand
self.category = category
self.description = description
self.conditions = conditions or []
self._parser = parser
self.arguments = []
self.argument_group_names = []
self.cls = None
self.pass_context = None
self.method = None
self.subcommand_handlers = {}
@property
def parser(self):
# Creating CLI parsers at command dispatch time can be expensive. Make
# it possible to lazy load them by using functions.
if callable(self._parser):
self._parser = self._parser()
return self._parser
@property
def docstring(self):
return self.cls.__dict__[self.method].__doc__
def __ior__(self, other):
if not isinstance(other, _MachCommand):
raise ValueError('can only operate on _MachCommand instances')
for a in self.__slots__:
if not getattr(self, a):
setattr(self, a, getattr(other, a))
return self
def CommandProvider(cls):
"""Class decorator to denote that it provides subcommands for Mach.
When this decorator is present, mach looks for commands being defined by
methods inside the class.
"""
# The implementation of this decorator relies on the parse-time behavior of
# decorators. When the module is imported, the method decorators (like
# @Command and @CommandArgument) are called *before* this class decorator.
# The side-effect of the method decorators is to store specifically-named
# attributes on the function types. We just scan over all functions in the
# class looking for the side-effects of the method decorators.
# Tell mach driver whether to pass context argument to __init__.
pass_context = False
if inspect.ismethod(cls.__init__):
spec = inspect.getargspec(cls.__init__)
if len(spec.args) > 2:
msg = 'Mach @CommandProvider class %s implemented incorrectly. ' + \
'__init__() must take 1 or 2 arguments. From %s'
msg = msg % (cls.__name__, inspect.getsourcefile(cls))
raise MachError(msg)
if len(spec.args) == 2:
pass_context = True
seen_commands = set()
# We scan __dict__ because we only care about the classes own attributes,
# not inherited ones. If we did inherited attributes, we could potentially
# define commands multiple times. We also sort keys so commands defined in
# the same class are grouped in a sane order.
for attr in sorted(cls.__dict__.keys()):
value = cls.__dict__[attr]
if not isinstance(value, types.FunctionType):
continue
command = getattr(value, '_mach_command', None)
if not command:
continue
# Ignore subcommands for now: we handle them later.
if command.subcommand:
continue
seen_commands.add(command.name)
if not command.conditions and Registrar.require_conditions:
continue
msg = 'Mach command \'%s\' implemented incorrectly. ' + \
'Conditions argument must take a list ' + \
'of functions. Found %s instead.'
if not isinstance(command.conditions, collections.Iterable):
msg = msg % (command.name, type(command.conditions))
raise MachError(msg)
for c in command.conditions:
if not hasattr(c, '__call__'):
msg = msg % (command.name, type(c))
raise MachError(msg)
command.cls = cls
command.method = attr
command.pass_context = pass_context
Registrar.register_command_handler(command)
# Now do another pass to get sub-commands. We do this in two passes so
# we can check the parent command existence without having to hold
# state and reconcile after traversal.
for attr in sorted(cls.__dict__.keys()):
value = cls.__dict__[attr]
if not isinstance(value, types.FunctionType):
continue
command = getattr(value, '_mach_command', None)
if not command:
continue
# It is a regular command.
if not command.subcommand:
continue
if command.name not in seen_commands:
raise MachError('Command referenced by sub-command does not '
'exist: %s' % command.name)
if command.name not in Registrar.command_handlers:
continue
command.cls = cls
command.method = attr
command.pass_context = pass_context
parent = Registrar.command_handlers[command.name]
if parent._parser:
raise MachError('cannot declare sub commands against a command '
'that has a parser installed: %s' % command)
if command.subcommand in parent.subcommand_handlers:
raise MachError('sub-command already defined: %s' % command.subcommand)
parent.subcommand_handlers[command.subcommand] = command
return cls
class Command(object):
"""Decorator for functions or methods that provide a mach command.
The decorator accepts arguments that define basic attributes of the
command. The following arguments are recognized:
category -- The string category to which this command belongs. Mach's
help will group commands by category.
description -- A brief description of what the command does.
parser -- an optional argparse.ArgumentParser instance or callable
that returns an argparse.ArgumentParser instance to use as the
basis for the command arguments.
For example:
@Command('foo', category='misc', description='Run the foo action')
def foo(self):
pass
"""
def __init__(self, name, **kwargs):
self._mach_command = _MachCommand(name=name, **kwargs)
def __call__(self, func):
if not hasattr(func, '_mach_command'):
func._mach_command = _MachCommand()
func._mach_command |= self._mach_command
return func
class SubCommand(object):
"""Decorator for functions or methods that provide a sub-command.
Mach commands can have sub-commands. e.g. ``mach command foo`` or
``mach command bar``. Each sub-command has its own parser and is
effectively its own mach command.
The decorator accepts arguments that define basic attributes of the
sub command:
command -- The string of the command this sub command should be
attached to.
subcommand -- The string name of the sub command to register.
description -- A textual description for this sub command.
"""
def __init__(self, command, subcommand, description=None):
self._mach_command = _MachCommand(name=command, subcommand=subcommand,
description=description)
def __call__(self, func):
if not hasattr(func, '_mach_command'):
func._mach_command = _MachCommand()
func._mach_command |= self._mach_command
return func
class CommandArgument(object):
"""Decorator for additional arguments to mach subcommands.
This decorator should be used to add arguments to mach commands. Arguments
to the decorator are proxied to ArgumentParser.add_argument().
For example:
@Command('foo', help='Run the foo action')
@CommandArgument('-b', '--bar', action='store_true', default=False,
help='Enable bar mode.')
def foo(self):
pass
"""
def __init__(self, *args, **kwargs):
if kwargs.get('nargs') == argparse.REMAINDER:
# These are the assertions we make in dispatcher.py about
# those types of CommandArguments.
assert len(args) == 1
assert all(k in ('default', 'nargs', 'help', 'group') for k in kwargs)
self._command_args = (args, kwargs)
def __call__(self, func):
if not hasattr(func, '_mach_command'):
func._mach_command = _MachCommand()
func._mach_command.arguments.insert(0, self._command_args)
return func
class CommandArgumentGroup(object):
"""Decorator for additional argument groups to mach commands.
This decorator should be used to add arguments groups to mach commands.
Arguments to the decorator are proxied to
ArgumentParser.add_argument_group().
For example:
@Command('foo', helps='Run the foo action')
@CommandArgumentGroup('group1')
@CommandArgument('-b', '--bar', group='group1', action='store_true',
default=False, help='Enable bar mode.')
def foo(self):
pass
The name should be chosen so that it makes sense as part of the phrase
'Command Arguments for <name>' because that's how it will be shown in the
help message.
"""
def __init__(self, group_name):
self._group_name = group_name
def __call__(self, func):
if not hasattr(func, '_mach_command'):
func._mach_command = _MachCommand()
func._mach_command.argument_group_names.insert(0, self._group_name)
return func
def SettingsProvider(cls):
"""Class decorator to denote that this class provides Mach settings.
When this decorator is encountered, the underlying class will automatically
be registered with the Mach registrar and will (likely) be hooked up to the
mach driver.
This decorator is only allowed on mach.config.ConfigProvider classes.
"""
if not issubclass(cls, ConfigProvider):
raise MachError('@SettingsProvider encountered on class that does ' +
'not derived from mach.config.ConfigProvider.')
Registrar.register_settings_provider(cls)
return cls
|
UOMx/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/tests/test_export_git.py
|
189
|
"""
Test the ability to export courses to xml from studio
"""
import copy
import os
import shutil
import subprocess
from uuid import uuid4
from django.conf import settings
from django.test.utils import override_settings
from .utils import CourseTestCase
import contentstore.git_export_utils as git_export_utils
from xmodule.modulestore.django import modulestore
from contentstore.utils import reverse_course_url
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class TestExportGit(CourseTestCase):
"""
Tests pushing a course to a git repository
"""
def setUp(self):
"""
Setup test course, user, and url.
"""
super(TestExportGit, self).setUp()
self.course_module = modulestore().get_course(self.course.id)
self.test_url = reverse_course_url('export_git', self.course.id)
def make_bare_repo_with_course(self, repo_name):
"""
Make a local bare repo suitable for exporting to in
tests
"""
# Build out local bare repo, and set course git url to it
repo_dir = os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR)
os.mkdir(repo_dir)
self.addCleanup(shutil.rmtree, repo_dir)
bare_repo_dir = '{0}/{1}.git'.format(
os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR),
repo_name
)
os.mkdir(bare_repo_dir)
self.addCleanup(shutil.rmtree, bare_repo_dir)
subprocess.check_output(['git', '--bare', 'init', ], cwd=bare_repo_dir)
self.populate_course()
self.course_module.giturl = 'file://{}'.format(bare_repo_dir)
modulestore().update_item(self.course_module, self.user.id)
def test_giturl_missing(self):
"""
Test to make sure an appropriate error is displayed
if course hasn't set giturl.
"""
response = self.client.get(self.test_url)
self.assertEqual(200, response.status_code)
self.assertIn(
('giturl must be defined in your '
'course settings before you can export to git.'),
response.content
)
response = self.client.get('{}?action=push'.format(self.test_url))
self.assertEqual(200, response.status_code)
self.assertIn(
('giturl must be defined in your '
'course settings before you can export to git.'),
response.content
)
def test_course_export_failures(self):
"""
Test failed course export response.
"""
self.course_module.giturl = 'foobar'
modulestore().update_item(self.course_module, self.user.id)
response = self.client.get('{}?action=push'.format(self.test_url))
self.assertIn('Export Failed:', response.content)
def test_exception_translation(self):
"""
Regression test for making sure errors are properly stringified
"""
self.course_module.giturl = 'foobar'
modulestore().update_item(self.course_module, self.user.id)
response = self.client.get('{}?action=push'.format(self.test_url))
self.assertNotIn('django.utils.functional.__proxy__', response.content)
def test_course_export_success(self):
"""
Test successful course export response.
"""
self.make_bare_repo_with_course('test_repo')
response = self.client.get('{}?action=push'.format(self.test_url))
self.assertIn('Export Succeeded', response.content)
def test_repo_with_dots(self):
"""
Regression test for a bad directory pathing of repo's that have dots.
"""
self.make_bare_repo_with_course('test.repo')
response = self.client.get('{}?action=push'.format(self.test_url))
self.assertIn('Export Succeeded', response.content)
def test_dirty_repo(self):
"""
Add additional items not in the repo and make sure they aren't
there after the export. This allows old content to removed
in the repo.
"""
repo_name = 'dirty_repo1'
self.make_bare_repo_with_course(repo_name)
git_export_utils.export_to_git(self.course.id,
self.course_module.giturl, self.user)
# Make arbitrary change to course to make diff
self.course_module.matlab_api_key = 'something'
modulestore().update_item(self.course_module, self.user.id)
# Touch a file in the directory, export again, and make sure
# the test file is gone
repo_dir = os.path.join(
os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR),
repo_name
)
test_file = os.path.join(repo_dir, 'test.txt')
open(test_file, 'a').close()
self.assertTrue(os.path.isfile(test_file))
git_export_utils.export_to_git(self.course.id,
self.course_module.giturl, self.user)
self.assertFalse(os.path.isfile(test_file))
|
indashnet/InDashNet.Open.UN2000
|
refs/heads/master
|
android/external/clang/tools/clang-format/clang-format-sublime.py
|
1
|
# This file is a minimal clang-format sublime-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Put this file into your sublime Packages directory, e.g. on Linux:
# ~/.config/sublime-text-2/Packages/User/clang-format-sublime.py
# - Add a key binding:
# { "keys": ["ctrl+shift+c"], "command": "clang_format" },
#
# With this integration you can press the bound key and clang-format will
# format the current lines and selections for all cursor positions. The lines
# or regions are extended to the next bigger syntactic entities.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import sublime
import sublime_plugin
import subprocess
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles
# (see clang-format -help).
style = 'LLVM'
class ClangFormatCommand(sublime_plugin.TextCommand):
def run(self, edit):
encoding = self.view.encoding()
if encoding == 'Undefined':
encoding = 'utf-8'
regions = []
command = [binary, '-style', style]
for region in self.view.sel():
regions.append(region)
region_offset = min(region.a, region.b)
region_length = abs(region.b - region.a)
command.extend(['-offset', str(region_offset),
'-length', str(region_length)])
old_viewport_position = self.view.viewport_position()
buf = self.view.substr(sublime.Region(0, self.view.size()))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output, error = p.communicate(buf.encode(encoding))
if not error:
self.view.replace(
edit, sublime.Region(0, self.view.size()),
output.decode(encoding))
self.view.sel().clear()
for region in regions:
self.view.sel().add(region)
# FIXME: Without the 10ms delay, the viewport sometimes jumps.
sublime.set_timeout(lambda: self.view.set_viewport_position(
old_viewport_position, False), 10)
else:
print error
|
kingvuplus/italysat-enigma2
|
refs/heads/master
|
lib/python/Plugins/Extensions/SocketMMI/SocketMMI.py
|
150
|
from Screens.Ci import MMIDialog
import socketmmi
class SocketMMIMessageHandler:
def __init__(self):
self.session = None
self.dlgs = { }
socketmmi.getSocketStateChangedCallbackList().append(self.socketStateChanged)
def setSession(self, session):
self.session = session
def connected(self):
return socketmmi.getState(0)
def getName(self):
return socketmmi.getName(0)
def startMMI(self):
slot = 0
self.dlgs[slot] = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, 2, socketmmi, _("wait for mmi..."))
def socketStateChanged(self, slot):
if slot in self.dlgs:
self.dlgs[slot].ciStateChanged()
elif socketmmi.availableMMI(slot) == 1:
if self.session:
self.dlgs[slot] = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, 3, socketmmi, _("wait for mmi..."))
def dlgClosed(self, slot):
if slot in self.dlgs:
del self.dlgs[slot]
|
robodasha/research_papers
|
refs/heads/master
|
setup.py
|
1
|
"""A setuptools based setup module.
"""
from setuptools import setup
__author__ = 'robodasha'
__email__ = 'damirah@live.com'
with open('README.md') as fp:
description = fp.read()
setup(
name='research_papers',
version='0.1.2',
description='Numerous tools for working with research papers',
long_description=description,
license='MIT',
url='https://github.com/robodasha/research_papers',
author='Drahomira Herrmannova',
author_email='damirah@live.com',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering',
'Topic :: Text Processing',
'Topic :: Utilities'
],
keywords='text mining',
packages=['research_papers', 'research_papers.tools'],
install_requires=['wheel', 'configparser', 'ordereddict', 'mendeley',
'pdfminer3k']
)
|
TomAugspurger/pandas
|
refs/heads/master
|
pandas/tests/series/methods/test_rename_axis.py
|
4
|
import pytest
from pandas import Index, MultiIndex, Series
import pandas._testing as tm
class TestSeriesRenameAxis:
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
ser = Series(list(range(len(mi))), index=mi)
result = ser.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
result = ser.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
result = ser.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
with pytest.raises(TypeError, match="unexpected"):
ser.rename_axis(columns="wrong")
def test_rename_axis_inplace(self, datetime_series):
# GH 15704
expected = datetime_series.rename_axis("foo")
result = datetime_series
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}])
def test_rename_axis_none(self, kwargs):
# GH 25034
index = Index(list("abc"), name="foo")
ser = Series([1, 2, 3], index=index)
result = ser.rename_axis(**kwargs)
expected_index = index.rename(None) if kwargs else index
expected = Series([1, 2, 3], index=expected_index)
tm.assert_series_equal(result, expected)
|
okolisny/integration_tests
|
refs/heads/master
|
cfme/tests/infrastructure/test_advanced_search_providers.py
|
1
|
# -*- coding: utf-8 -*-
"""This testing module tests the behaviour of the search box in the Provider section
It does not check for filtering results so far."""
import fauxfactory
import pytest
from selenium.common.exceptions import NoSuchElementException
from cfme.infrastructure.provider import InfraProvider
from fixtures.pytest_store import store
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.log import logger
from cfme.web_ui import search
from cfme.web_ui.search import DisabledButtonException
from cfme.web_ui.cfme_exception import (assert_no_cfme_exception,
is_cfme_exception, cfme_exception_text)
pytestmark = [
pytest.mark.usefixtures("setup_cleanup_search", "infra_provider"), pytest.mark.tier(3)]
@pytest.yield_fixture(scope="function")
def setup_cleanup_search():
"""Navigate to InfraProvider, clear search on setup and teardown"""
navigate_to(InfraProvider, 'All')
search.ensure_no_filter_applied()
yield
# cleanup after test
search.ensure_no_filter_applied()
search.ensure_advanced_search_closed()
@pytest.yield_fixture(scope="function")
def rails_delete_filter(request):
"""Introspect a function bound filter_name and use ssh_client and rails to delete it"""
# No pre-test, just cleanup after yield
yield
filter_name = getattr(request.function, "filter_name", None)
logger.debug('rails_delete_filter: calling rails to delete filter: {}'.format(filter_name))
if filter_name:
try:
store.current_appliance.ssh_client.run_rails_command(
'"MiqSearch.where(:description => {}).first.delete"'.format(repr(filter_name)))
except Exception as ex:
logger.warning('rails_delete_filter: exception during delete. {}'.format(ex))
pass
else:
logger.warning('rails_delete_filter: failed to get filter_name')
def test_can_do_advanced_search():
navigate_to(InfraProvider, 'All')
assert search.is_advanced_search_possible(), "Cannot do advanced search here!"
@pytest.mark.requires("test_can_do_advanced_search")
def test_can_open_advanced_search():
navigate_to(InfraProvider, 'All')
search.ensure_advanced_search_open()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_without_user_input():
# Set up the filter
search.fill_and_apply_filter("fill_count(Infrastructure Provider.VMs, >=, 0)")
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_with_user_input():
# Set up the filter
logger.debug('DEBUG: test_with_user_input: fill and apply')
search.fill_and_apply_filter("fill_count(Infrastructure Provider.VMs, >=)",
fill_callback={"COUNT": 0})
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_with_user_input_and_cancellation():
# Set up the filter
search.fill_and_apply_filter(
"fill_count(Infrastructure Provider.VMs, >=)", fill_callback={"COUNT": 0},
cancel_on_user_filling=True
)
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_cancel(rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_cancel.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(test_filter_save_cancel.filter_name))
# Try save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >)",
test_filter_save_cancel.filter_name, cancel=True)
assert_no_cfme_exception()
assert search.reset_filter()
# Exception depends on system state - Load button will be disabled if there are no saved filters
with pytest.raises((DisabledButtonException, NoSuchElementException)):
search.load_filter(saved_filter=test_filter_save_cancel.filter_name)
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_and_load(rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_and_load.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(test_filter_save_and_load.filter_name))
# Save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)",
test_filter_save_and_load.filter_name)
assert_no_cfme_exception()
# Reset filter
assert search.reset_filter()
# Load filter
assert search.load_filter(test_filter_save_and_load.filter_name)
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_and_cancel_load(rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_and_cancel_load.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(test_filter_save_and_cancel_load.filter_name))
# Save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)",
test_filter_save_and_cancel_load.filter_name)
assert_no_cfme_exception()
# Reset Filter
assert search.reset_filter()
# Load and cancel
assert search.load_filter(test_filter_save_and_cancel_load.filter_name, cancel=True)
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_and_cancel_load_with_user_input(rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_and_cancel_load_with_user_input.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(
test_filter_save_and_cancel_load_with_user_input.filter_name))
# Save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >)",
test_filter_save_and_cancel_load_with_user_input.filter_name)
assert_no_cfme_exception()
# Reset Filter
assert search.reset_filter()
search.load_and_apply_filter(
test_filter_save_and_cancel_load_with_user_input.filter_name,
fill_callback={"COUNT": 0},
cancel_on_user_filling=True
)
assert_no_cfme_exception()
def test_quick_search_without_filter(request):
assert_no_cfme_exception()
# Make sure that we empty the regular search field after the test
request.addfinalizer(search.ensure_normal_search_empty)
# Filter this host only
search.normal_search(fauxfactory.gen_alphanumeric())
assert_no_cfme_exception()
def test_quick_search_with_filter(request):
search.fill_and_apply_filter("fill_count(Infrastructure Provider.VMs, >=, 0)")
assert_no_cfme_exception()
# Make sure that we empty the regular search field after the test
request.addfinalizer(search.ensure_normal_search_empty)
# Filter this host only
search.normal_search(fauxfactory.gen_alphanumeric())
assert_no_cfme_exception()
def test_can_delete_filter():
filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(filter_name))
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)", filter_name)
assert_no_cfme_exception()
search.reset_filter()
assert_no_cfme_exception()
search.load_filter(filter_name)
assert_no_cfme_exception()
if not search.delete_filter():
raise pytest.fail("Cannot delete filter! Probably the delete button is not present!")
assert_no_cfme_exception()
def test_delete_button_should_appear_after_save(rails_delete_filter):
"""Delete button appears only after load, not after save"""
# bind filter_name to the function for fixture cleanup
test_delete_button_should_appear_after_save.filter_name = fauxfactory.gen_alphanumeric()
search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)",
test_delete_button_should_appear_after_save.filter_name)
if not search.delete_filter(): # Returns False if the button is not present
pytest.fail("Could not delete filter right after saving!")
def test_cannot_delete_more_than_once():
"""When Delete button appars, it does not want to go away"""
filter_name = fauxfactory.gen_alphanumeric()
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)", filter_name)
assert search.load_filter(filter_name) # circumvent the thing happening in previous test
# Delete once
if not search.delete_filter():
pytest.fail("Could not delete the filter even first time!")
assert_no_cfme_exception()
# Try it second time
if search.delete_filter(): # If the button is there, it says True
# This should not happen
msg = "Delete twice accepted!"
if is_cfme_exception():
msg += " CFME Exception text: `{}`".format(cfme_exception_text())
pytest.fail(msg)
|
melgor/melgor.github.io
|
refs/heads/master
|
node_modules/pygmentize-bundled/vendor/pygments/pygments/util.py
|
269
|
# -*- coding: utf-8 -*-
"""
pygments.util
~~~~~~~~~~~~~
Utility functions.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import codecs
split_path_re = re.compile(r'[/\\ ]')
doctype_lookup_re = re.compile(r'''(?smx)
(<\?.*?\?>)?\s*
<!DOCTYPE\s+(
[a-zA-Z_][a-zA-Z0-9]*\s+
[a-zA-Z_][a-zA-Z0-9]*\s+
"[^"]*")
[^>]*>
''')
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>(?uism)')
class ClassNotFound(ValueError):
"""
If one of the get_*_by_* functions didn't find a matching class.
"""
class OptionError(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, basestring):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, basestring):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""
Return a static text analysation function that
returns float values.
"""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
"""
Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile('^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""
Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.match(text)
if m is None:
return False
doctype = m.group(2)
return re.compile(regex).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""
Check if the file looks like it has a html doctype.
"""
return doctype_matches(text, r'html\s+PUBLIC\s+"-//W3C//DTD X?HTML.*')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""
Check if a doctype exists or if we have some tags.
"""
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
# Python narrow build compatibility
def _surrogatepair(c):
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
def unirange(a, b):
"""
Returns a regular expression string to match the given non-BMP range.
"""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
# Python 2/3 compatibility
if sys.version_info < (3,0):
b = bytes = str
u_prefix = 'u'
import StringIO, cStringIO
BytesIO = cStringIO.StringIO
StringIO = StringIO.StringIO
uni_open = codecs.open
else:
import builtins
bytes = builtins.bytes
u_prefix = ''
def b(s):
if isinstance(s, str):
return bytes(map(ord, s))
elif isinstance(s, bytes):
return s
else:
raise TypeError("Invalid argument %r for b()" % (s,))
import io
BytesIO = io.BytesIO
StringIO = io.StringIO
uni_open = builtins.open
|
fluxer/spm
|
refs/heads/master
|
nuitka/nuitka/PythonVersions.py
|
1
|
# Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Python version specifics.
This abstracts the Python version decisions. This makes decisions based on
the numbers, and attempts to give them meaningful names. Where possible it
should attempt to make run time detections.
"""
import os
import re
import sys
def getSupportedPythonVersions():
return ("2.6", "2.7", "3.2", "3.3", "3.4", "3.5")
def getSupportedPythonVersionStr():
supported_python_versions = getSupportedPythonVersions()
supported_python_versions_str = repr(supported_python_versions)[1:-1]
supported_python_versions_str = re.sub(
r"(.*),(.*)$",
r"\1, or\2",
supported_python_versions_str
)
return supported_python_versions_str
def _getPythonVersion():
big, major, minor = sys.version_info[0:3]
return big * 100 + major * 10 + minor
python_version = _getPythonVersion()
python_version_full_str = '.'.join(str(s) for s in sys.version_info[0:3])
python_version_str = '.'.join(str(s) for s in sys.version_info[0:2])
def isAtLeastSubVersion(version):
if version < 280 and \
python_version >= 280 and \
python_version < 300:
return True
if version // 10 != python_version // 10:
return False
return python_version >= version
def doShowUnknownEncodingName():
# Python 3.3.3 or higher does it, 3.4 always did.
if python_version >= 333:
return True
# Python2.7 after 2.7.6 does it.
if isAtLeastSubVersion(276):
return True
# Debian back ports do it.
if "2.7.5+" in sys.version or "3.3.2+" in sys.version:
return True
return False
def getErrorMessageExecWithNestedFunction():
""" Error message of the concrete Python in case an exec occurs in a
function that takes a closure variable.
"""
assert python_version < 300
# Need to use "exec" to detect the syntax error, pylint: disable=W0122
try:
exec("""
def f():
exec ""
def nested():
return closure""")
except SyntaxError as e:
return e.message.replace("'f'", "'%s'")
def getComplexCallSequenceErrorTemplate():
if not hasattr(getComplexCallSequenceErrorTemplate, "result"):
try:
# We are doing this on purpose, to get the exception.
# pylint: disable=E1102,E1133
f = None
f(*None)
except TypeError as e:
result = e.args[0].replace("NoneType object", "%s").replace("NoneType", "%s")
getComplexCallSequenceErrorTemplate.result = result
else:
sys.exit("Error, cannot detect expected error message.")
return getComplexCallSequenceErrorTemplate.result
def isUninstalledPython():
return "Anaconda" in sys.version or \
"WinPython" in sys.version or \
(os.name == "nt" and python_version >= 350)
def getRunningPythonDLLPath():
import ctypes.wintypes
GetModuleHandle = ctypes.windll.kernel32.GetModuleHandleW # @UndefinedVariable
GetModuleHandle.argtypes = (
ctypes.wintypes.LPWSTR,
)
GetModuleHandle.restype = ctypes.wintypes.DWORD
big, major = sys.version_info[0:2]
dll_module_name = "python%d%d" % (big, major)
module_handle = GetModuleHandle(dll_module_name)
if module_handle == 0:
dll_module_name += "_d"
module_handle = GetModuleHandle(dll_module_name)
assert module_handle, (sys.executable, dll_module_name, sys.flags.debug)
MAX_PATH = 4096
buf = ctypes.create_unicode_buffer(MAX_PATH)
GetModuleFileName = ctypes.windll.kernel32.GetModuleFileNameW # @UndefinedVariable
GetModuleFileName.argtypes = (
ctypes.wintypes.HANDLE,
ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD
)
GetModuleFileName.restype = ctypes.wintypes.DWORD
res = GetModuleFileName(module_handle, buf, MAX_PATH)
assert res != 0
dll_path = os.path.normcase(buf.value)
assert os.path.exists(dll_path), dll_path
return dll_path
def getTargetPythonDLLPath():
dll_path = getRunningPythonDLLPath()
from nuitka.Options import isPythonDebug
if dll_path.endswith("_d.dll"):
if not isPythonDebug():
dll_path = dll_path[:-5] + ".dll"
if not os.path.exists(dll_path):
sys.exit("Error, cannot switch to non-debug Python, not installed.")
else:
if isPythonDebug():
dll_path = dll_path[:-4] + "_d.dll"
if not os.path.exists(dll_path):
sys.exit("Error, cannot switch to debug Python, not installed.")
return dll_path
|
hustodemon/spacewalk
|
refs/heads/master
|
java/scripts/api/configchannel.py
|
12
|
#!/usr/bin/python
import xmlrpclib
import unittest
from random import randint
from config import *
class ConfigChannel(RhnTestCase):
def setUp(self):
RhnTestCase.setUp(self)
def tearDown(self):
RhnTestCase.tearDown(self)
def test_schedule_file_comparisons(self):
random_int = randint(1, 1000000)
channel_label = "apitest_channel%s" % random_int
channel_name = "apitest channel%s" % random_int
channel_description = "channel description"
channel_details = client.configchannel.create(self.session_key, channel_label, channel_name, channel_description)
# print channel_details
path = "/tmp/test_file.sh"
path_info = {'contents' : 'echo hello',
'owner' : 'root',
'group' : 'root',
'permissions' : '644',
'macro-start-delimiter' : '{|',
'macro-end-delimiter' : '|}'}
client.configchannel.createOrUpdatePath(self.session_key, channel_label, path, False, path_info)
actionId = client.configchannel.scheduleFileComparisons(self.session_key, channel_label, path, [SERVER_ID])
action_details = client.schedule.listInProgressSystems(self.session_key, actionId)
# print action_details
self.assertTrue(len(action_details) > 0)
# clean up from test
client.configchannel.deleteChannels(self.session_key, [channel_label])
if __name__ == "__main__":
unittest.main()
|
rwakulszowa/poradnia
|
refs/heads/master
|
poradnia/contrib/sites/migrations/0004_auto_20160409_2334.py
|
3
|
# Generated by Django 1.9.4 on 2016-04-09 21:34
import django.contrib.sites.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("sites", "0003_auto_20151217_2355")]
operations = [
migrations.AlterField(
model_name="site",
name="domain",
field=models.CharField(
max_length=100,
unique=True,
validators=[django.contrib.sites.models._simple_domain_name_validator],
verbose_name="domain name",
),
)
]
|
karlalopez/Authentise-Store
|
refs/heads/master
|
app.py
|
2
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug import secure_filename
from flask.ext.login import LoginManager
import os
from itsdangerous import URLSafeTimedSerializer
# Enter here your shop name and tagline
shop_name = "Shop name"
shop_tagline = "Best shop tagline ever"
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
UPLOAD_FOLDER = os.path.join('static/uploads')
MODELS_FOLDER = os.path.join('models')
ALLOWED_EXTENSIONS = set(['stl'])
app = Flask(__name__)
app.secret_key = 'SECRET_KEY' # You need to set up an app secret key.
ts = URLSafeTimedSerializer(app.config['SECRET_KEY']) # You need to set up an app secret key.
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MODELS_FOLDER'] = MODELS_FOLDER
# Set up the SQLAlchemy Database to be a local file 'store.db'
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://localhost/store'
db = SQLAlchemy(app)
if __name__ == "__main__":
from views import *
del session
app.run(debug=True)
|
ctu-geoforall-lab/qgis-pu-plugin
|
refs/heads/master
|
__init__.py
|
1
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
puPlugin
A QGIS plugin
Plugin pro pozemkové úpravy
-------------------
begin : 2016-09-01
copyright : (C) 2016 by Ondřej Svoboda
email : svoboond@gmail.com
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
def classFactory(iface):
"""Loads puPlugin class.
Args:
iface (QgisInterface): A reference to the QgisInterface.
Returns:
class: The main class of the PU Plugin.
"""
from puplugin import puPlugin
return puPlugin(iface)
|
imsparsh/python-for-android
|
refs/heads/master
|
python-modules/pybluez/examples/advanced/inquiry-with-rssi.py
|
47
|
# performs a simple device inquiry, followed by a remote name request of each
# discovered device
import os
import sys
import struct
import bluetooth._bluetooth as bluez
def printpacket(pkt):
for c in pkt:
sys.stdout.write("%02x " % struct.unpack("B",c)[0])
print
def read_inquiry_mode(sock):
"""returns the current mode, or -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# read_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# first read the current inquiry mode.
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE )
pkt = sock.recv(255)
status,mode = struct.unpack("xxxxxxBB", pkt)
if status != 0: mode = -1
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return mode
def write_inquiry_mode(sock, mode):
"""returns 0 on success, -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# write_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# send the command!
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE, struct.pack("B", mode) )
pkt = sock.recv(255)
status = struct.unpack("xxxxxxB", pkt)[0]
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
if status != 0: return -1
return 0
def device_inquiry_with_with_rssi(sock):
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# perform a device inquiry on bluetooth device #0
# The inquiry should last 8 * 1.28 = 10.24 seconds
# before the inquiry is performed, bluez should flush its cache of
# previously discovered devices
flt = bluez.hci_filter_new()
bluez.hci_filter_all_events(flt)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
duration = 4
max_responses = 255
cmd_pkt = struct.pack("BBBBB", 0x33, 0x8b, 0x9e, duration, max_responses)
bluez.hci_send_cmd(sock, bluez.OGF_LINK_CTL, bluez.OCF_INQUIRY, cmd_pkt)
results = []
done = False
while not done:
pkt = sock.recv(255)
ptype, event, plen = struct.unpack("BBB", pkt[:3])
if event == bluez.EVT_INQUIRY_RESULT_WITH_RSSI:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
rssi = struct.unpack("b", pkt[1+13*nrsp+i])[0]
results.append( ( addr, rssi ) )
print "[%s] RSSI: [%d]" % (addr, rssi)
elif event == bluez.EVT_INQUIRY_COMPLETE:
done = True
elif event == bluez.EVT_CMD_STATUS:
status, ncmd, opcode = struct.unpack("BBH", pkt[3:7])
if status != 0:
print "uh oh..."
printpacket(pkt[3:7])
done = True
elif event == bluez.EVT_INQUIRY_RESULT:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
results.append( ( addr, -1 ) )
print "[%s] (no RRSI)" % addr
else:
print "unrecognized packet type 0x%02x" % ptype
print "event ", event
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return results
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
except:
print "error accessing bluetooth device..."
sys.exit(1)
try:
mode = read_inquiry_mode(sock)
except Exception, e:
print "error reading inquiry mode. "
print "Are you sure this a bluetooth 1.2 device?"
print e
sys.exit(1)
print "current inquiry mode is %d" % mode
if mode != 1:
print "writing inquiry mode..."
try:
result = write_inquiry_mode(sock, 1)
except Exception, e:
print "error writing inquiry mode. Are you sure you're root?"
print e
sys.exit(1)
if result != 0:
print "error while setting inquiry mode"
print "result: %d" % result
device_inquiry_with_with_rssi(sock)
|
jcfr/mystic
|
refs/heads/master
|
examples_UQ/TEST3b.py
|
1
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2009-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
#######################################################################
# scaling and mpi info; also optimizer configuration parameters
# hard-wired: use DE solver, don't use mpi, F-F' calculation
# (similar to concentration.in)
#######################################################################
scale = 1.0
#XXX: <mpi config goes here>
npop = 20
maxiter = 1000
maxfun = 1e+6
convergence_tol = 1e-4
crossover = 0.9
percent_change = 0.9
#######################################################################
# the model function
# (similar to Simulation.cpp)
#######################################################################
from mystic.models.poly import poly1d
from mystic.models.poly import chebyshev8coeffs as Chebyshev8
from math import sin
def function(x):
"""a 8th-order Chebyshev polynomial + sin + a constant
8 6 4 2
f = (128*x1 - 256*x1 + 160*x1 - 32*x1 + 1) * sin(x2) * x3
Input:
- x -- 1-d array of coefficients [x1,x2,x3]
Output:
- f -- the function result
"""
return poly1d(Chebyshev8)(x[0]) * sin(x[1]) * x[2]
#######################################################################
# the subdiameter calculation
# (similar to driver.sh)
#######################################################################
def costFactory(i):
"""a cost factory for the cost function"""
def cost(rv):
"""compute the diameter as a calculation of cost
Input:
- rv -- 1-d array of model parameters
Output:
- diameter -- scale * | F(x) - F(x')|**2
"""
# prepare x and xprime
params = rv[:-1] #XXX: assumes Xi' is at rv[-1]
params_prime = rv[:i]+rv[-1:]+rv[i+1:-1] #XXX: assumes Xi' is at rv[-1]
# get the F(x) response
Fx = function(params)
# get the F(x') response
Fxp = function(params_prime)
# compute diameter
return -scale * (Fx - Fxp)**2
return cost
#######################################################################
# the differential evolution optimizer
# (replaces the call to dakota)
#######################################################################
def dakota(cost,lb,ub):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
from mystic.strategy import Best1Exp
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import getch, random_seed
random_seed(123)
#stepmon = VerboseMonitor(100)
stepmon = Monitor()
evalmon = Monitor()
ndim = len(lb) # [(1 + RVend) - RVstart] + 1
solver = DifferentialEvolutionSolver2(ndim,npop)
solver.SetRandomInitialPoints(min=lb,max=ub)
solver.SetStrictRanges(min=lb,max=ub)
solver.SetEvaluationLimits(maxiter,maxfun)
solver.SetEvaluationMonitor(evalmon)
solver.SetGenerationMonitor(stepmon)
tol = convergence_tol
solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
CrossProbability=crossover,ScalingFactor=percent_change)
print solver.bestSolution
diameter = -solver.bestEnergy / scale
func_evals = solver.evaluations
return diameter, func_evals
#######################################################################
# loop over model parameters to calculate concentration of measure
# (similar to main.cc)
#######################################################################
def UQ(start,end,lower,upper):
diameters = []
function_evaluations = []
total_func_evals = 0
total_diameter = 0.0
for i in range(start,end+1):
lb = lower[start:end+1] + [lower[i]]
ub = upper[start:end+1] + [upper[i]]
#construct cost function and run optimizer
cost = costFactory(i)
subdiameter, func_evals = dakota(cost,lb,ub) #XXX: no initial conditions
function_evaluations.append(func_evals)
diameters.append(subdiameter)
total_func_evals += function_evaluations[-1]
total_diameter += diameters[-1]
print "subdiameters (squared): %s" % diameters
print "diameter (squared): %s" % total_diameter
print "func_evals: %s => %s" % (function_evaluations, total_func_evals)
return
#######################################################################
# rank, bounds, and restart information
# (similar to concentration.variables)
#######################################################################
if __name__ == '__main__':
RVstart = 0; RVend = 2
lower_bounds = [-2.0,-2.0,-2.0]
upper_bounds = [ 2.0, 2.0, 2.0]
print " function:\n %s * sin(x2) * x3" % poly1d(Chebyshev8)
print " parameters: ['x1', 'x2', 'x3']"
print " lower bounds: %s" % lower_bounds
print " upper bounds: %s" % upper_bounds
print " ..."
UQ(RVstart,RVend,lower_bounds,upper_bounds)
|
jpakkane/meson
|
refs/heads/master
|
test cases/windows/8 find program/test-script-ext.py
|
14
|
#!/usr/bin/env python3
print('ext/noext')
|
rdeheele/odoo
|
refs/heads/master
|
addons/web/controllers/__init__.py
|
1214
|
from . import main
|
OCA/reporting-engine
|
refs/heads/12.0
|
report_context/__manifest__.py
|
1
|
# Copyright 2019 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'Report Context',
'summary': """
Adding context to reports""",
'version': '12.0.1.0.0',
'license': 'AGPL-3',
'author': 'Creu Blanca,Odoo Community Association (OCA)',
'website': 'https://github.com/OCA/reporting-engine',
'depends': [
'web',
],
'data': [
'views/ir_actions_report.xml',
'data/config_parameter.xml',
],
}
|
nikste/tensorflow
|
refs/heads/master
|
tensorflow/contrib/rnn/python/ops/core_rnn_cell.py
|
44
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells.
## Base interface for all RNN Cells
@@RNNCell
## RNN Cells for use with TensorFlow's core RNN methods
@@BasicRNNCell
@@BasicLSTMCell
@@GRUCell
@@LSTMCell
## Classes storing split `RNNCell` state
@@LSTMStateTuple
## RNN Cell wrappers (RNNCells that wrap other RNNCells)
@@MultiRNNCell
@@DropoutWrapper
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
@@DeviceWrapper
@@ResidualWrapper
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
|
tjandy/work
|
refs/heads/master
|
robot/socket/google/protobuf/internal/more_extensions_pb2.py
|
4
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/more_extensions.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/more_extensions.proto',
package='google.protobuf.internal',
syntax='proto2',
serialized_pb=_b('\n.google/protobuf/internal/more_extensions.proto\x12\x18google.protobuf.internal\"P\n\x0fTopLevelMessage\x12=\n\nsubmessage\x18\x01 \x01(\x0b\x32).google.protobuf.internal.ExtendedMessage\"\x1b\n\x0f\x45xtendedMessage*\x08\x08\x01\x10\x80\x80\x80\x80\x02\"-\n\x0e\x46oreignMessage\x12\x1b\n\x13\x66oreign_message_int\x18\x01 \x01(\x05:I\n\x16optional_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x01 \x01(\x05:w\n\x1aoptional_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x02 \x01(\x0b\x32(.google.protobuf.internal.ForeignMessage:I\n\x16repeated_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x03 \x03(\x05:w\n\x1arepeated_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x04 \x03(\x0b\x32(.google.protobuf.internal.ForeignMessage')
)
OPTIONAL_INT_EXTENSION_FIELD_NUMBER = 1
optional_int_extension = _descriptor.FieldDescriptor(
name='optional_int_extension', full_name='google.protobuf.internal.optional_int_extension', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
OPTIONAL_MESSAGE_EXTENSION_FIELD_NUMBER = 2
optional_message_extension = _descriptor.FieldDescriptor(
name='optional_message_extension', full_name='google.protobuf.internal.optional_message_extension', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
REPEATED_INT_EXTENSION_FIELD_NUMBER = 3
repeated_int_extension = _descriptor.FieldDescriptor(
name='repeated_int_extension', full_name='google.protobuf.internal.repeated_int_extension', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
REPEATED_MESSAGE_EXTENSION_FIELD_NUMBER = 4
repeated_message_extension = _descriptor.FieldDescriptor(
name='repeated_message_extension', full_name='google.protobuf.internal.repeated_message_extension', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_TOPLEVELMESSAGE = _descriptor.Descriptor(
name='TopLevelMessage',
full_name='google.protobuf.internal.TopLevelMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='submessage', full_name='google.protobuf.internal.TopLevelMessage.submessage', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=156,
)
_EXTENDEDMESSAGE = _descriptor.Descriptor(
name='ExtendedMessage',
full_name='google.protobuf.internal.ExtendedMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1, 536870912), ],
oneofs=[
],
serialized_start=158,
serialized_end=185,
)
_FOREIGNMESSAGE = _descriptor.Descriptor(
name='ForeignMessage',
full_name='google.protobuf.internal.ForeignMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='foreign_message_int', full_name='google.protobuf.internal.ForeignMessage.foreign_message_int', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=187,
serialized_end=232,
)
_TOPLEVELMESSAGE.fields_by_name['submessage'].message_type = _EXTENDEDMESSAGE
DESCRIPTOR.message_types_by_name['TopLevelMessage'] = _TOPLEVELMESSAGE
DESCRIPTOR.message_types_by_name['ExtendedMessage'] = _EXTENDEDMESSAGE
DESCRIPTOR.message_types_by_name['ForeignMessage'] = _FOREIGNMESSAGE
DESCRIPTOR.extensions_by_name['optional_int_extension'] = optional_int_extension
DESCRIPTOR.extensions_by_name['optional_message_extension'] = optional_message_extension
DESCRIPTOR.extensions_by_name['repeated_int_extension'] = repeated_int_extension
DESCRIPTOR.extensions_by_name['repeated_message_extension'] = repeated_message_extension
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TopLevelMessage = _reflection.GeneratedProtocolMessageType('TopLevelMessage', (_message.Message,), dict(
DESCRIPTOR = _TOPLEVELMESSAGE,
__module__ = 'google.protobuf.internal.more_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TopLevelMessage)
))
_sym_db.RegisterMessage(TopLevelMessage)
ExtendedMessage = _reflection.GeneratedProtocolMessageType('ExtendedMessage', (_message.Message,), dict(
DESCRIPTOR = _EXTENDEDMESSAGE,
__module__ = 'google.protobuf.internal.more_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.ExtendedMessage)
))
_sym_db.RegisterMessage(ExtendedMessage)
ForeignMessage = _reflection.GeneratedProtocolMessageType('ForeignMessage', (_message.Message,), dict(
DESCRIPTOR = _FOREIGNMESSAGE,
__module__ = 'google.protobuf.internal.more_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.ForeignMessage)
))
_sym_db.RegisterMessage(ForeignMessage)
ExtendedMessage.RegisterExtension(optional_int_extension)
optional_message_extension.message_type = _FOREIGNMESSAGE
ExtendedMessage.RegisterExtension(optional_message_extension)
ExtendedMessage.RegisterExtension(repeated_int_extension)
repeated_message_extension.message_type = _FOREIGNMESSAGE
ExtendedMessage.RegisterExtension(repeated_message_extension)
# @@protoc_insertion_point(module_scope)
|
csmengwan/autorest
|
refs/heads/master
|
AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyNumber/autorestnumbertestservice/models/error.py
|
104
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""Error
:param status:
:type status: int
:param message:
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, status=None, message=None):
self.status = status
self.message = message
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
|
mvaled/sentry
|
refs/heads/master
|
src/sentry/mediators/sentry_app_installation_tokens/destroyer.py
|
3
|
from __future__ import absolute_import
from sentry.utils.cache import memoize
from sentry.mediators import Mediator, Param
from sentry.models import AuditLogEntryEvent, SentryAppInstallationToken
class Destroyer(Mediator):
api_token = Param("sentry.models.ApiToken")
generate_audit = Param(bool, default=False)
user = Param("sentry.models.User")
request = Param("rest_framework.request.Request", required=False)
def call(self):
self._destroy_sentry_app_installation_token()
self._destroy_api_token()
return self.api_token
def _destroy_api_token(self):
self.api_token.delete()
def _destroy_sentry_app_installation_token(self):
install_token = SentryAppInstallationToken.objects.get(api_token=self.api_token)
self.sentry_app_installation = install_token.sentry_app_installation
install_token.delete()
def audit(self):
from sentry.utils.audit import create_audit_entry
if self.request and self.generate_audit:
create_audit_entry(
request=self.request,
organization=self.organization,
target_object=self.api_token.id,
event=AuditLogEntryEvent.INTERNAL_INTEGRATION_REMOVE_TOKEN,
data={"sentry_app": self.sentry_app.name},
)
def record_analytics(self):
from sentry import analytics
analytics.record(
"sentry_app_installation_token.deleted",
user_id=self.user.id,
organization_id=self.organization.id,
sentry_app_installation_id=self.sentry_app_installation.id,
sentry_app=self.sentry_app.slug,
)
@memoize
def sentry_app(self):
return self.sentry_app_installation.sentry_app
@memoize
def organization(self):
return self.sentry_app_installation.organization
|
FireWRT/OpenWrt-Firefly-Libraries
|
refs/heads/master
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/asyncio/constants.py
|
127
|
"""Constants."""
# After the connection is lost, log warnings after this many write()s.
LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
# Seconds to wait before retrying accept().
ACCEPT_RETRY_DELAY = 1
|
amoennin/elastalert
|
refs/heads/master
|
elastalert/kibana.py
|
13
|
# -*- coding: utf-8 -*-
import urllib
from util import EAException
dashboard_temp = {'editable': True,
u'failover': False,
u'index': {u'default': u'NO_TIME_FILTER_OR_INDEX_PATTERN_NOT_MATCHED',
u'interval': u'none',
u'pattern': u'',
u'warm_fields': True},
u'loader': {u'hide': False,
u'load_elasticsearch': True,
u'load_elasticsearch_size': 20,
u'load_gist': True,
u'load_local': True,
u'save_default': True,
u'save_elasticsearch': True,
u'save_gist': False,
u'save_local': True,
u'save_temp': True,
u'save_temp_ttl': u'30d',
u'save_temp_ttl_enable': True},
u'nav': [{u'collapse': False,
u'enable': True,
u'filter_id': 0,
u'notice': False,
u'now': False,
u'refresh_intervals': [u'5s',
u'10s',
u'30s',
u'1m',
u'5m',
u'15m',
u'30m',
u'1h',
u'2h',
u'1d'],
u'status': u'Stable',
u'time_options': [u'5m',
u'15m',
u'1h',
u'6h',
u'12h',
u'24h',
u'2d',
u'7d',
u'30d'],
u'timefield': u'@timestamp',
u'type': u'timepicker'}],
u'panel_hints': True,
u'pulldowns': [{u'collapse': False,
u'enable': True,
u'notice': True,
u'type': u'filtering'}],
u'refresh': False,
u'rows': [{u'collapsable': True,
u'collapse': False,
u'editable': True,
u'height': u'350px',
u'notice': False,
u'panels': [{u'annotate': {u'enable': False,
u'field': u'_type',
u'query': u'*',
u'size': 20,
u'sort': [u'_score', u'desc']},
u'auto_int': True,
u'bars': True,
u'derivative': False,
u'editable': True,
u'fill': 3,
u'grid': {u'max': None, u'min': 0},
u'group': [u'default'],
u'interactive': True,
u'interval': u'1m',
u'intervals': [u'auto',
u'1s',
u'1m',
u'5m',
u'10m',
u'30m',
u'1h',
u'3h',
u'12h',
u'1d',
u'1w',
u'1M',
u'1y'],
u'legend': True,
u'legend_counts': True,
u'lines': False,
u'linewidth': 3,
u'mode': u'count',
u'options': True,
u'percentage': False,
u'pointradius': 5,
u'points': False,
u'queries': {u'ids': [0], u'mode': u'all'},
u'resolution': 100,
u'scale': 1,
u'show_query': True,
u'span': 12,
u'spyable': True,
u'stack': True,
u'time_field': u'@timestamp',
u'timezone': u'browser',
u'title': u'Events over time',
u'tooltip': {u'query_as_alias': True,
u'value_type': u'cumulative'},
u'type': u'histogram',
u'value_field': None,
u'x-axis': True,
u'y-axis': True,
u'y_format': u'none',
u'zerofill': True,
u'zoomlinks': True}],
u'title': u'Graph'},
{u'collapsable': True,
u'collapse': False,
u'editable': True,
u'height': u'350px',
u'notice': False,
u'panels': [{u'all_fields': False,
u'editable': True,
u'error': False,
u'field_list': True,
u'fields': [],
u'group': [u'default'],
u'header': True,
u'highlight': [],
u'localTime': True,
u'normTimes': True,
u'offset': 0,
u'overflow': u'min-height',
u'pages': 5,
u'paging': True,
u'queries': {u'ids': [0], u'mode': u'all'},
u'size': 100,
u'sort': [u'@timestamp', u'desc'],
u'sortable': True,
u'span': 12,
u'spyable': True,
u'status': u'Stable',
u'style': {u'font-size': u'9pt'},
u'timeField': u'@timestamp',
u'title': u'All events',
u'trimFactor': 300,
u'type': u'table'}],
u'title': u'Events'}],
u'services': {u'filter': {u'ids': [0],
u'list': {u'0': {u'active': True,
u'alias': u'',
u'field': u'@timestamp',
u'from': u'now-24h',
u'id': 0,
u'mandate': u'must',
u'to': u'now',
u'type': u'time'}}},
u'query': {u'ids': [0],
u'list': {u'0': {u'alias': u'',
u'color': u'#7EB26D',
u'enable': True,
u'id': 0,
u'pin': False,
u'query': u'',
u'type': u'lucene'}}}},
u'style': u'dark',
u'title': u'ElastAlert Alert Dashboard'}
kibana4_time_temp = "(refreshInterval:(display:Off,section:0,value:0),time:(from:'%s',mode:absolute,to:'%s'))"
def set_time(dashboard, start, end):
dashboard['services']['filter']['list']['0']['from'] = start
dashboard['services']['filter']['list']['0']['to'] = end
def set_index_name(dashboard, name):
dashboard['index']['default'] = name
def add_filter(dashboard, es_filter):
next_id = max(dashboard['services']['filter']['ids']) + 1
kibana_filter = {'active': True,
'alias': '',
'id': next_id,
'mandate': 'must'}
if 'not' in es_filter:
es_filter = es_filter['not']
kibana_filter['mandate'] = 'mustNot'
if 'query' in es_filter:
es_filter = es_filter['query']
if 'query_string' in es_filter:
kibana_filter['type'] = 'querystring'
kibana_filter['query'] = es_filter['query_string']['query']
elif 'term' in es_filter:
kibana_filter['type'] = 'field'
f_field, f_query = es_filter['term'].items()[0]
# Wrap query in quotes, otherwise certain characters cause kibana to throw errors
if isinstance(f_query, basestring):
f_query = '"%s"' % (f_query.replace('"', '\\"'))
if isinstance(f_query, list):
# Escape quotes
f_query = [item.replace('"', '\\"') for item in f_query]
# Wrap in quotes
f_query = ['"%s"' % (item) for item in f_query]
# Convert into joined query
f_query = '(%s)' % (' AND '.join(f_query))
kibana_filter['field'] = f_field
kibana_filter['query'] = f_query
elif 'range' in es_filter:
kibana_filter['type'] = 'range'
f_field, f_range = es_filter['range'].items()[0]
kibana_filter['field'] = f_field
kibana_filter.update(f_range)
else:
raise EAException("Could not parse filter %s for Kibana" % (es_filter))
dashboard['services']['filter']['ids'].append(next_id)
dashboard['services']['filter']['list'][str(next_id)] = kibana_filter
def set_name(dashboard, name):
dashboard['title'] = name
def set_included_fields(dashboard, fields):
dashboard['rows'][1]['panels'][0]['fields'] = list(set(fields))
def filters_from_dashboard(db):
filters = db['services']['filter']['list']
config_filters = []
or_filters = []
for filter in filters.values():
filter_type = filter['type']
if filter_type == 'time':
continue
if filter_type == 'querystring':
config_filter = {'query': {'query_string': {'query': filter['query']}}}
if filter_type == 'field':
config_filter = {'term': {filter['field']: filter['query']}}
if filter_type == 'range':
config_filter = {'range': {filter['field']: {'from': filter['from'], 'to': filter['to']}}}
if filter['mandate'] == 'mustNot':
config_filter = {'not': config_filter}
if filter['mandate'] == 'either':
or_filters.append(config_filter)
else:
config_filters.append(config_filter)
if or_filters:
config_filters.append({'or': or_filters})
return config_filters
def kibana4_dashboard_link(dashboard, starttime, endtime):
time_settings = kibana4_time_temp % (starttime, endtime)
time_settings = urllib.quote(time_settings)
return "%s?_g=%s" % (dashboard, time_settings)
|
ctrlaltdel/neutrinator
|
refs/heads/master
|
vendor/openstack/tests/unit/database/v1/test_instance.py
|
2
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from openstack.tests.unit import base
from openstack.database.v1 import instance
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'flavor': '1',
'id': IDENTIFIER,
'links': '3',
'name': '4',
'status': '5',
'volume': '6',
'datastore': {'7': 'seven'},
'region': '8',
'hostname': '9',
'created': '10',
'updated': '11',
}
class TestInstance(base.TestCase):
def test_basic(self):
sot = instance.Instance()
self.assertEqual('instance', sot.resource_key)
self.assertEqual('instances', sot.resources_key)
self.assertEqual('/instances', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = instance.Instance(**EXAMPLE)
self.assertEqual(EXAMPLE['flavor'], sot.flavor)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['links'], sot.links)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['status'], sot.status)
self.assertEqual(EXAMPLE['volume'], sot.volume)
self.assertEqual(EXAMPLE['datastore'], sot.datastore)
self.assertEqual(EXAMPLE['region'], sot.region)
self.assertEqual(EXAMPLE['hostname'], sot.hostname)
self.assertEqual(EXAMPLE['created'], sot.created_at)
self.assertEqual(EXAMPLE['updated'], sot.updated_at)
def test_enable_root_user(self):
sot = instance.Instance(**EXAMPLE)
response = mock.Mock()
response.body = {'user': {'name': 'root', 'password': 'foo'}}
response.json = mock.Mock(return_value=response.body)
sess = mock.Mock()
sess.post = mock.Mock(return_value=response)
self.assertEqual(response.body['user'], sot.enable_root_user(sess))
url = ("instances/%s/root" % IDENTIFIER)
sess.post.assert_called_with(url,)
def test_is_root_enabled(self):
sot = instance.Instance(**EXAMPLE)
response = mock.Mock()
response.body = {'rootEnabled': True}
response.json = mock.Mock(return_value=response.body)
sess = mock.Mock()
sess.get = mock.Mock(return_value=response)
self.assertTrue(sot.is_root_enabled(sess))
url = ("instances/%s/root" % IDENTIFIER)
sess.get.assert_called_with(url,)
def test_action_restart(self):
sot = instance.Instance(**EXAMPLE)
response = mock.Mock()
response.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=response)
self.assertIsNone(sot.restart(sess))
url = ("instances/%s/action" % IDENTIFIER)
body = {'restart': {}}
sess.post.assert_called_with(url,
json=body)
def test_action_resize(self):
sot = instance.Instance(**EXAMPLE)
response = mock.Mock()
response.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=response)
flavor = 'http://flavor/flav'
self.assertIsNone(sot.resize(sess, flavor))
url = ("instances/%s/action" % IDENTIFIER)
body = {'resize': {'flavorRef': flavor}}
sess.post.assert_called_with(url,
json=body)
def test_action_resize_volume(self):
sot = instance.Instance(**EXAMPLE)
response = mock.Mock()
response.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=response)
size = 4
self.assertIsNone(sot.resize_volume(sess, size))
url = ("instances/%s/action" % IDENTIFIER)
body = {'resize': {'volume': size}}
sess.post.assert_called_with(url,
json=body)
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2017_06_01_preview/aio/_policy_client.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import PolicyClientConfiguration
from .operations import PolicyAssignmentsOperations
from .operations import PolicySetDefinitionsOperations
from .operations import PolicyDefinitionsOperations
from .. import models
class PolicyClient(object):
"""To manage and control access to your resources, you can define customized policies and assign them at a scope.
:ivar policy_assignments: PolicyAssignmentsOperations operations
:vartype policy_assignments: azure.mgmt.resource.policy.v2017_06_01_preview.aio.operations.PolicyAssignmentsOperations
:ivar policy_set_definitions: PolicySetDefinitionsOperations operations
:vartype policy_set_definitions: azure.mgmt.resource.policy.v2017_06_01_preview.aio.operations.PolicySetDefinitionsOperations
:ivar policy_definitions: PolicyDefinitionsOperations operations
:vartype policy_definitions: azure.mgmt.resource.policy.v2017_06_01_preview.aio.operations.PolicyDefinitionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = PolicyClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.policy_assignments = PolicyAssignmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.policy_set_definitions = PolicySetDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.policy_definitions = PolicyDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PolicyClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
axelmagn/metrics
|
refs/heads/master
|
ai-metrics/aimetrics/metrics.py
|
1
|
import json
import numpy as np
from sklearn.cross_validation import (StratifiedShuffleSplit, StratifiedKFold,
KFold)
from sklearn.preprocessing import binarize, normalize
from sklearn.metrics import (accuracy_score, roc_curve, roc_auc_score,
f1_score, classification_report)
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError, HTTPClient
from urllib.parse import urljoin
from .conf import get_conf
from .estimator import RemoteBSTClassifier
_conf = get_conf()['aimetrics']['metrics']
@gen.coroutine
def fetch_data(base_url, client_id, project_id, **kwargs):
"""Fetch all labeled records from cloudant for a project
Arguments
---------
base_url : str
client_id : str
project_id : str
**kwargs : **dict
Any additional keyword arguments are passed to
tornado.httpclient.AsyncHTTPClient.fetch. This is where
authentication credentials can be specified.
"""
http_client = AsyncHTTPClient()
url_suffix = _conf['data']['url_suffix'].format(client_id=client_id,
project_id=project_id)
url = urljoin(base_url, url_suffix)
method = _conf['data']['method']
response = yield http_client.fetch(url, method=method, **kwargs)
data = json.loads(response.body.decode('utf-8'))
features = data[0]['input'].keys()
classes = data[0]['output'].keys()
# print("DATA: " + response.body.decode('utf-8'))
X = np.asarray([[row['input'][k] for k in features] for row in data])
y = np.asarray([[row['output'].get(k, 0) for k in classes] for row in data])
return {
"features": features,
"classes": classes,
"X": X,
'y': y,
}
@gen.coroutine
def remote_classifier_report(base_url, model_type, client_id, project_id,
model_params=None, auth_username=None, auth_password=None,
threshold=0.5, destroy_model=True, save_model=False):
"""Evaluate model performances on a specific BST project dataset.
Performs 5-fold cross-validation using all classified records from the
provided client and project ID, and returns a list of evaluation metrics
from each run.
Parameters
----------
base_url : str
the base URL of the remote API.
model_type : str
The model type to use on the remote API. Refer to the bst.ai project
for available options.
client_id : str
The client's BlackSage Tech ID
project_id : str
The client's project BlackSage Tech ID
auth_username : str (default: None)
The username to use for basic authentication.
auth_password : str (default: None)
The password to use for basic authentication.
model_params : dict (default: {})
Any model parameters for the remote classifier. Refer to the bst.ai
project for available options.
threshold : float (default: 0.5)
The threshold at which to consider a probability prediction a positive
classification for use in metrics which take binary input.
destroy_model : boolean (default: True)
If True, the trained remote model is destroyed after evaluation is
complete.
save_model : boolean (default: False)
If true, a serialization of the model is attached to the output
dictionary under the key `model`.
"""
data = yield fetch_data(base_url, client_id, project_id,
auth_username=auth_username, auth_password=auth_password)
X, y = normalize(data['X']), normalize(data['y'])
# import ipdb; ipdb.set_trace() # DEBUG
"""
tv_ind, test_ind = StratifiedShuffleSplit(y, 1, 0.2)[0]
X_tv, X_test = X[tv_ind], X[test_ind]
y_tv, y_test = y[tv_ind], y[test_ind]
"""
#skf = StratifiedKFold(y, 5, True)
skf = KFold(y.shape[0], 5, True)
cv_results = []
for train_ind, test_ind in skf:
X_train, X_test = X[train_ind], X[test_ind]
y_train, y_test = y[train_ind], y[test_ind]
result = yield remote_classifier_metrics(base_url, model_type, X_train,
y_train, X_test, y_test, data['classes'],
model_params=model_params, destroy_model=destroy_model,
threshold=threshold, save_model=save_model)
cv_results.append(result)
return {"cross_validation": cv_results}
@gen.coroutine
def remote_classifier_metrics(base_url, model_type, X_train, y_train, X_test,
y_test, data_classes, model_params=None, destroy_model=True,
threshold=0.5, save_model=False):
"""Train and evaluate a single model with the provided data.
Parameters
----------
base_url : str
the base URL of the remote API.
model_type : str
The model type to use on the remote API. Refer to the bst.ai project
for available options.
X_train : np.ndarray
Training feature vectors
y_train : np.ndarray
Training target vectors
X_test : np.ndarray
Testing feature vectors
y_test : np.ndarray
Testing target vectors
data_classes : [str..]
Class labels for y targets
model_params : dict (default: {})
Any model parameters for the remote classifier. Refer to the bst.ai
project for available options.
destroy_model : boolean (default: True)
If True, the trained remote model is destroyed after evaluation is
complete.
threshold : float (default: 0.5)
The threshold at which to consider a probability prediction a positive
classification for use in metrics which take binary input.
save_model : boolean (default: False)
If true, a serialization of the model is attached to the output
dictionary under the key `model`.
Returns: A dictionary of evaluation metrics for the trained model.
"""
# create a new classifier and object to store results
clf = RemoteBSTClassifier(base_url, model_type, model_params=model_params)
result = {}
try:
result['train_error'] = yield clf.async_fit(X_train, y_train)
y_pred_proba = yield clf.async_predict_proba(X_test)
if save_model:
result['model'] = yield clf.get_model()
y_pred = binarize(y_pred_proba, threshold)
result['acc'] = accuracy_score(y_test, y_pred)
result['f1_score'] = f1_score(y_test, y_pred)
result['classification_report'] = classification_report(y_test, y_pred)
roc= {}
for i, label in enumerate(data_classes):
y_test_i = y_test[:,i]
# skip tests with no actual values
if np.sum(y_test_i) == 0:
continue
fpr, tpr, thresh = roc_curve(y_test[:,i], y_pred_proba[:,i])
roc[label] = {
"fpr": list(fpr),
"tpr": list(tpr),
"threshold": list(thresh),
}
result['roc'] = roc
try:
result['roc_auc'] = roc_auc_score(y_test, y_pred_proba)
except:
result['roc_auc'] = None
finally:
if(destroy_model):
yield clf.destroy_model()
return result
|
guptaadhip/fast
|
refs/heads/master
|
tests/unit/lib/mock_socket_test.py
|
45
|
#!/usr/bin/env python
#
# Copyright 2011-2012 Andreas Wundsam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os.path
from copy import copy
sys.path.append(os.path.dirname(__file__) + "/../../..")
from pox.lib.mock_socket import MockSocket
class MockSocketTest(unittest.TestCase):
def setUp(self):
pass
def test_simple_send(self):
(a, b) = MockSocket.pair()
a.send("Hallo")
self.assertEquals(b.recv(), "Hallo")
b.send("Servus")
self.assertEquals(a.recv(), "Servus")
def test_ready_to_recv(self):
(a, b) = MockSocket.pair()
a.send("Hallo")
self.assertFalse(a.ready_to_recv())
self.assertTrue(b.ready_to_recv())
self.assertEquals(b.recv(), "Hallo")
self.assertFalse(b.ready_to_recv())
self.assertFalse(a.ready_to_recv())
b.send("Servus")
self.assertTrue(a.ready_to_recv())
self.assertEquals(a.recv(), "Servus")
self.assertFalse(a.ready_to_recv())
def test_on_ready_to_recv(self):
self.seen_size = -1
self.called = 0
def ready(socket, size):
self.called += 1
self.seen_size = size
(a, b) = MockSocket.pair()
b.set_on_ready_to_recv(ready)
self.assertEquals(self.called, 0)
a.send("Hallo")
self.assertEquals(self.called, 1)
self.assertEquals(self.seen_size, 5)
# check that it doesn't get called on the other sockets data
b.send("Huhu")
self.assertEquals(self.called, 1)
def test_empty_recv(self):
""" test_empty_recv: Check that empty reads on socket return ""
Note that this is actually non-sockety behavior and should probably be changed. This
test documents it as intended for now, though
"""
(a, b) = MockSocket.pair()
self.assertEquals(a.recv(), "")
if __name__ == '__main__':
unittest.main()
|
ridfrustum/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/template/loaders/filesystem.py
|
229
|
"""
Wrapper for loading templates from the filesystem.
"""
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def load_template_source(self, template_name, template_dirs=None):
tried = []
for filepath in self.get_template_sources(template_name, template_dirs):
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except IOError:
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg)
load_template_source.is_usable = True
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
# For backwards compatibility
import warnings
warnings.warn(
"'django.template.loaders.filesystem.load_template_source' is deprecated; use 'django.template.loaders.filesystem.Loader' instead.",
DeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = True
|
ppanczyk/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/netcfg.py
|
42
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import hashlib
from ansible.module_utils.six.moves import zip
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.network_common import to_list
DEFAULT_COMMENT_TOKENS = ['#', '!', '/*', '*/', 'echo']
DEFAULT_IGNORE_LINES_RE = set([
re.compile("Using \d+ out of \d+ bytes"),
re.compile("Building configuration"),
re.compile("Current configuration : \d+ bytes")
])
class ConfigLine(object):
def __init__(self, raw):
self.text = str(raw).strip()
self.raw = raw
self._children = list()
self._parents = list()
def __str__(self):
return self.raw
def __eq__(self, other):
return self.line == other.line
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, key):
for item in self._children:
if item.text == key:
return item
raise KeyError(key)
@property
def line(self):
line = self.parents
line.append(self.text)
return ' '.join(line)
@property
def children(self):
return _obj_to_text(self._children)
@property
def child_objs(self):
return self._children
@property
def parents(self):
return _obj_to_text(self._parents)
@property
def path(self):
config = _obj_to_raw(self._parents)
config.append(self.raw)
return '\n'.join(config)
@property
def has_children(self):
return len(self._children) > 0
@property
def has_parents(self):
return len(self._parents) > 0
def add_child(self, obj):
assert isinstance(obj, ConfigLine), 'child must be of type `ConfigLine`'
self._children.append(obj)
def ignore_line(text, tokens=None):
for item in (tokens or DEFAULT_COMMENT_TOKENS):
if text.startswith(item):
return True
for regex in DEFAULT_IGNORE_LINES_RE:
if regex.match(text):
return True
def _obj_to_text(x):
return [o.text for o in x]
def _obj_to_raw(x):
return [o.raw for o in x]
def _obj_to_block(objects, visited=None):
items = list()
for o in objects:
if o not in items:
items.append(o)
for child in o._children:
if child not in items:
items.append(child)
return _obj_to_raw(items)
def dumps(objects, output='block', comments=False):
if output == 'block':
items = _obj_to_block(objects)
elif output == 'commands':
items = _obj_to_text(objects)
else:
raise TypeError('unknown value supplied for keyword output')
if output != 'commands':
if comments:
for index, item in enumerate(items):
nextitem = index + 1
if nextitem < len(items) and not item.startswith(' ') and items[nextitem].startswith(' '):
item = '!\n%s' % item
items[index] = item
items.append('!')
items.append('end')
return '\n'.join(items)
class NetworkConfig(object):
def __init__(self, indent=1, contents=None, ignore_lines=None):
self._indent = indent
self._items = list()
self._config_text = None
if ignore_lines:
for item in ignore_lines:
if not isinstance(item, re._pattern_type):
item = re.compile(item)
DEFAULT_IGNORE_LINES_RE.add(item)
if contents:
self.load(contents)
@property
def items(self):
return self._items
@property
def config_text(self):
return self._config_text
@property
def sha1(self):
sha1 = hashlib.sha1()
sha1.update(to_bytes(str(self), errors='surrogate_or_strict'))
return sha1.digest()
def __getitem__(self, key):
for line in self:
if line.text == key:
return line
raise KeyError(key)
def __iter__(self):
return iter(self._items)
def __str__(self):
return '\n'.join([c.raw for c in self.items])
def __len__(self):
return len(self._items)
def load(self, s):
self._config_text = s
self._items = self.parse(s)
def loadfp(self, fp):
return self.load(open(fp).read())
def parse(self, lines, comment_tokens=None):
toplevel = re.compile(r'\S')
childline = re.compile(r'^\s*(.+)$')
entry_reg = re.compile(r'([{};])')
ancestors = list()
config = list()
curlevel = 0
prevlevel = 0
for linenum, line in enumerate(to_native(lines, errors='surrogate_or_strict').split('\n')):
text = entry_reg.sub('', line).strip()
cfg = ConfigLine(line)
if not text or ignore_line(text, comment_tokens):
continue
# handle top level commands
if toplevel.match(line):
ancestors = [cfg]
prevlevel = curlevel
curlevel = 0
# handle sub level commands
else:
match = childline.match(line)
line_indent = match.start(1)
prevlevel = curlevel
curlevel = int(line_indent / self._indent)
if (curlevel - 1) > prevlevel:
curlevel = prevlevel + 1
parent_level = curlevel - 1
cfg._parents = ancestors[:curlevel]
if curlevel > len(ancestors):
config.append(cfg)
continue
for i in range(curlevel, len(ancestors)):
ancestors.pop()
ancestors.append(cfg)
ancestors[parent_level].add_child(cfg)
config.append(cfg)
return config
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
if item.parents == path[:-1]:
return item
def get_block(self, path):
assert isinstance(path, list), 'path argument must be a list object'
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self._expand_block(obj)
def get_block_config(self, path):
block = self.get_block(path)
return dumps(block, 'block')
def _expand_block(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj._children:
if child in S:
continue
self._expand_block(child, S)
return S
def _diff_line(self, other):
updates = list()
for item in self.items:
if item not in other:
updates.append(item)
return updates
def _diff_strict(self, other):
updates = list()
for index, line in enumerate(self.items):
try:
if str(line).strip() != str(other[index]).strip():
updates.append(line)
except (AttributeError, IndexError):
updates.append(line)
return updates
def _diff_exact(self, other):
updates = list()
if len(other) != len(self.items):
updates.extend(self.items)
else:
for ours, theirs in zip(self.items, other):
if ours != theirs:
updates.extend(self.items)
break
return updates
def difference(self, other, match='line', path=None, replace=None):
"""Perform a config diff against the another network config
:param other: instance of NetworkConfig to diff against
:param match: type of diff to perform. valid values are 'line',
'strict', 'exact'
:param path: context in the network config to filter the diff
:param replace: the method used to generate the replacement lines.
valid values are 'block', 'line'
:returns: a string of lines that are different
"""
if path and match != 'line':
try:
other = other.get_block(path)
except ValueError:
other = list()
else:
other = other.items
# generate a list of ConfigLines that aren't in other
meth = getattr(self, '_diff_%s' % match)
updates = meth(other)
if replace == 'block':
parents = list()
for item in updates:
if not item.has_parents:
parents.append(item)
else:
for p in item._parents:
if p not in parents:
parents.append(p)
updates = list()
for item in parents:
updates.extend(self._expand_block(item))
visited = set()
expanded = list()
for item in updates:
for p in item._parents:
if p.line not in visited:
visited.add(p.line)
expanded.append(p)
expanded.append(item)
visited.add(item.line)
return expanded
def add(self, lines, parents=None):
ancestors = list()
offset = 0
obj = None
# global config command
if not parents:
for line in lines:
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_block(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self._indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj._parents = list(ancestors)
ancestors[-1]._children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in lines:
# check if child already exists
for child in ancestors[-1]._children:
if child.text == line:
break
else:
offset = len(parents) * self._indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item._parents = ancestors
ancestors[-1]._children.append(item)
self.items.append(item)
class CustomNetworkConfig(NetworkConfig):
def items_text(self):
return [item.text for item in self.items]
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.child_objs:
if child in S:
continue
self.expand_section(child, S)
return S
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
|
stuartsale/iso_lib
|
refs/heads/master
|
isolib/__init__.py
|
1
|
import math
import numpy as np
import scipy.interpolate as si
from iso_grid import iso_grid
from iso_grid_tg import iso_grid_tefflogg
from padova_isomake import iso_interp, padova_interpolated_isomake
|
OpenDataNode/odn-ckancommons
|
refs/heads/master
|
odn_ckancommons/ckan_helper.py
|
1
|
'''
@author: jmc, mvi
'''
import json
import urllib2
import urllib
import requests
import ssl
import sys
def is_python_in_2_7_9():
return sys.version[:5] >= "2.7.9"
if is_python_in_2_7_9():
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
CERTS="/etc/ssl/certs/ca-certificates.crt"
ssl_ctx.load_verify_locations(CERTS)
class CkanAPIWrapper():
'''
This class uses API interface to return, create, update, delete, search for datasets
or dataset resources
'''
def __init__(self, url, api_key):
assert url
self.url = url
self.api_key = api_key
def _send_request(self, data_string, url):
assert url
if data_string:
data_string = urllib.quote(data_string)
request = urllib2.Request(url)
# Creating a dataset requires an authorization header.
if self.api_key:
request.add_header('Authorization', self.api_key)
response = None
try:
# Make the HTTP request.
if is_python_in_2_7_9():
response = urllib2.urlopen(request, data_string, context=ssl_ctx)
else:
response = urllib2.urlopen(request, data_string)
assert response.code == 200
# Use the json module to load CKAN's response into a dictionary.
response_dict = json.loads(response.read())
if response.url != url:
response_dict['_redirected_to'] = response.url
return response_dict
finally:
if response:
response.close()
def send_request(self, data_string, url):
assert url
response_dict = self._send_request(data_string, url)
assert response_dict['success']
# package_create returns the created package as its result.
return response_dict['result']
def site_read(self):
'''
For checking if url is CKAN site
:return: true if correct url
'''
url = self.url + "/api/action/site_read"
try:
resp = self._send_request('', url)
redirected = None
if resp.has_key('_redirected_to'):
redirected = resp['_redirected_to'].split("/api/action/site_read")[0]
return resp['result'], redirected
except (urllib2.HTTPError, urllib2.URLError):
return False, None
def has_edit_rights(self, organization_id_or_name):
'''
Checks if the the user (api_key in constructor) has edit rights
in the organization given (to create and edit datasets)
:param organization_id_or_name: id or name of organization
:type organization_id_or_name: string
:return: True if has edit rights, False otherwise
'''
url = self.url + "/api/action/organization_list_for_user"
data_string = json.dumps({u"permission":u"create_dataset"})
resp = self.send_request(data_string, url)
for organization in resp:
if organization['id'] == organization_id_or_name or \
organization['name'] == organization_id_or_name:
return True
return False
def package_create(self, dataset):
assert dataset
dataset_dict = dataset.tojson_without_resource()
data_string = json.dumps(dataset_dict)
url = self.url + "/api/action/package_create"
return self.send_request(data_string, url)
def package_update(self, dataset):
'''
Updates existing dataset while any missing parameters will nulled
except automatically set parameters like 'revision_id'
To only update parameters given use:
package_update_data
'''
assert dataset
dataset_dict = dataset.tojson_without_resource()
data_string = json.dumps(dataset_dict)
url = self.url + "/api/action/package_update"
return self.send_request(data_string, url)
def package_update_data(self, package_id, data_to_change):
'''
Changes data of existing (!) package. Can be used to 'reactivate' deleted
datasets (not permanently deleted) by:
package_update_data('package_id_or_name', {'state':'active'})
!!! this MUST NOT be used to update resources with url_type == 'upload'
For updating resource data use resource_update_data instead
!!! changes ONLY provided parameters
:param package_id: package id or name
:param data_to_change: [dictionary] data we want to change for the package
'''
assert package_id
assert data_to_change
package = self.get_package(package_id)
assert package
package.update(data_to_change)
data_string = json.dumps(package)
url = self.url + '/api/action/package_update'
return self.send_request(data_string, url)
def resource_create(self, resource):
assert resource
url = self.url + "/api/action/resource_create"
if u'url_type' in resource and resource[u'url_type'] == u'upload':
# needs to do multipart-form-data request
return self.resource_create_update_with_upload(url, resource)
data_string = json.dumps(resource)
return self.send_request(data_string, url)
def resource_update(self, resource):
'''
Updates existing resource while any missing parameters will nulled
except automatically set parameters like 'revision_id'
To only update parameters given use:
resource_update_data
'''
assert resource
url = self.url + "/api/action/resource_update"
if u'url_type' in resource and resource['url_type'] == 'upload':
# needs to do multipart-form-data request
return self.resource_create_update_with_upload(url, resource)
data_string = json.dumps(resource)
return self.send_request(data_string, url)
def resource_delete(self, resource_id):
'''Deletes existing resource
:param resource_id: resource id
:type resource_id: string
'''
assert resource_id
data_string = json.dumps({'id':resource_id})
url = self.url + "/api/action/resource_delete"
return self.send_request(data_string, url)
def resource_update_data(self, resource_id, data_to_change):
'''
Used for updating resources
:param resource_id: resource id, name can't be used like with package
:param data_to_change: [dictionary] data to be changed
!!! changes ONLY provided parameters
::usage::
resource_update_data('resource_id', {'name':'new name'})
'''
assert resource_id
assert data_to_change
resource = self.get_resource(resource_id)
assert resource
resource.update(data_to_change)
data_string = json.dumps(resource)
url = self.url + '/api/action/resource_update'
return self.send_request(data_string, url)
def resource_create_update_with_upload(self, url, data):
"""
Uploads resource file
:param url: create or update url for creating resource
:param data: [dictionary] resource data like 'url', 'name', ...
::usage::
url = self.url + '/api/action/resource_update'
data = {'id':'resource_id', 'url_type':'upload', 'name':'file_name.xml'}
self.resource_create_update_with_upload(url, data)
"""
assert url
assert data
file = None
try:
resource_file_url = data.pop('url')
data['url'] = ''
# retrieving file from source
if is_python_in_2_7_9():
file = urllib2.urlopen(resource_file_url,context=ssl_ctx)
else:
file = urllib2.urlopen(resource_file_url)
file.name = resource_file_url.split('/')[-1]
# uploading file
response = requests.post(url,
data=data,
headers={'X-CKAN-API-Key':self.api_key}, verify=False,
files=[('upload', file)])
response = json.loads(response.content)
if response['success'] == False:
raise Exception(response.get('error', {}).values())
return response['result']
finally:
if file:
file.close()
def resource_search_by_name(self, name):
assert name
dataset_dict = {}
dataset_dict['query'] = 'name:' + name
data_string = json.dumps(dataset_dict)
url = self.url + "/api/action/resource_search"
result_ckan = self.send_request(data_string, url)
id_resource = None
if result_ckan['count'] == 1:
results = result_ckan['results']
result = results[0]
id_resource = result['id']
found = True
else:
found = False
return found, id_resource
def _is_datastore_resource(self, resource):
return resource.get('url_type', False) and resource.get('url_type', '') == 'datastore'
def datastore_search(self, search_parameters_dict):
data_string = json.dumps(search_parameters_dict)
url = self.url + "/api/action/datastore_search"
return self.send_request(data_string, url)
def datastore_create(self, data_dict):
"""Create datastore resource
for structure of data_dict:
see http://docs.ckan.org/en/ckan-2.2/datastore.html#ckanext.datastore.logic.action.datastore_create
"""
data_string = json.dumps(data_dict)
url = self.url + "/api/action/datastore_create"
return self.send_request(data_string, url)
def datastore_upsert(self, data_dict):
data_string = json.dumps(data_dict)
url = self.url + "/api/action/datastore_upsert"
return self.send_request(data_string, url)
def datastore_delete(self, resource_id):
data_dict = {
'resource_id': resource_id,
'force': True
}
data_string = json.dumps(data_dict)
url = self.url + "/api/action/datastore_delete"
return self.send_request(data_string, url)
def get_package_resource_by_name(self, name, package_id):
assert name
assert package_id
package = self.get_package(package_id)
for resource in package['resources']:
if resource['name'] == name:
return True, resource['id']
return False, None
def resource_search_by_url(self, url, package_id):
assert url
package = self.get_package(package_id)
for resource in package['resources']:
if resource['url'] == url:
return True, resource['id']
return False, None
def package_search_by_name(self, dataset):
assert dataset
dataset_dict = {}
dataset_dict['q'] = 'name:' + dataset.name
data_string = json.dumps(dataset_dict)
url = self.url + "/api/action/package_search"
result = self.send_request(data_string, url)
id_package = None
if result['count'] == 1:
id_package = result['results'][0]['id']
found = True
else:
found = False
return found, id_package
def package_delete_all(self):
'''
Doesn't delete dataset permanently, only changes the state
to deleted, but 'deleted' dataset can't be retrieved with
search through api
'''
ids = self.get_all_package_ids()
dataset_num = len(ids)
for i, dataset_id in enumerate(ids, start=1):
print '[%d / %d] deleting dataset with id %s' % (i, dataset_num, dataset_id,)
try:
self.package_delete(dataset_id)
print 'dataset deleted: %s' % (dataset_id,)
except urllib2.HTTPError, e:
print "error: " + str(e)
def package_delete(self, package_id):
assert package_id
url = self.url + '/api/action/package_delete'
data_string = json.dumps({'id': package_id})
self.send_request(data_string, url)
def get_all_package_ids(self, limit=None, offset=None):
dataset_dict = {}
if limit:
dataset_dict['limit'] = limit
if offset:
dataset_dict['offset'] = offset
data_string = json.dumps(dataset_dict)
url = self.url + '/api/action/package_list'
return self.send_request(data_string, url)
def get_package(self, package_id):
assert package_id
dataset_dict = {
'id': package_id,
'use_default_schema':True,
}
data_string = json.dumps(dataset_dict)
url = self.url + '/api/action/package_show'
try:
return self.send_request(data_string, url)
except urllib2.HTTPError, e:
if e.code == 404: # Not Found
return None
else:
raise e
def get_resource(self, resource_id):
assert resource_id
res_dict = {'id': resource_id}
data_string = json.dumps(res_dict)
url = self.url + '/api/action/resource_show'
try:
return self.send_request(data_string, url)
except urllib2.HTTPError, e:
if e.code == 404: # Not Found
return None
else:
raise e
def get_changed_packages_since(self, since_date_iso_format):
'''
Find ids of packages that were changed since provided date
:param since_date_iso_format: date since when we are looking for changes [ISO_8601]
::usage::
_get_changed_packages_since('2014-10-29T09:50:50+00:00')
'''
assert since_date_iso_format
url = self.url + '/api/search/revision?since_time=%s' % since_date_iso_format
package_ids = []
try:
revision_ids = self._send_request(None, url)
if len(revision_ids):
for i, revision_id in enumerate(revision_ids, start=1):
url = self.url + '/api/action/revision_show'
print '[%d / %d] %s' % (i, len(revision_ids), revision_id,)
try:
data_string = json.dumps({'id': revision_id})
revision = self.send_request(data_string, url)
except Exception, e:
print 'Unable to get content for URL: %s: %s' % (url, str(e),)
continue
for package_id in revision['packages']:
if not package_id in package_ids:
package_ids.append(package_id)
else:
print 'No packages have been updated on the remote CKAN instance since the last harvest job'
return None
except urllib2.HTTPError,e:
print "error gathering changes: %s" % (url, str(e),)
return None
return package_ids
def get_modified_field(self, dataset):
modified = None
for e in dataset.extras:
if e['key'] == 'modified' :
modified = e['value']
break
return modified
def get_dataset_field(self, dataset):
result = None
for e in dataset.extras:
if e['key'] == 'dataset' :
result = e['value']
break
return result
def compare_dataset_by_modified(self, dataset):
dataset_dict = {}
modified = self.get_modified_field(dataset)
modified = modified.replace(":", "\:")
query = 'name:' + dataset.name
if modified != None:
query += " modified:" + modified
dataset_dict['q'] = 'name:' + dataset.name + " modified:" + modified
data_string = json.dumps(dataset_dict)
url = self.url + "/api/action/package_search"
result = self.send_request(data_string, url)
if result['count'] > 0:
found = False
else:
found = True
return found
def organization_create(self, organization):
assert organization
organizations = {'name': organization}
data_string = json.dumps(organizations)
url = self.url + "/api/action/organization_create"
return self.send_request(data_string, url)
def organization_list(self):
url = self.url + "/api/3/action/organization_list"
return self.send_request('', url)
def organization_show(self, id):
dataset_dict = {
'id': id
}
data_string = json.dumps(dataset_dict)
url = self.url + '/api/action/organization_show'
return self.send_request(data_string, url)
# redirection safe
def organization_show2(self, package_id):
assert package_id
url = self.url + '/api/action/organization_show?id={0}'.format(package_id)
try:
return self.send_request('', url)
except urllib2.HTTPError, e:
if e.code == 404: # Not Found
return None
else:
raise e
def organization_update(self, organization):
assert organization
data_string = json.dumps(organization)
url = self.url + "/api/action/organization_update"
return self.send_request(data_string, url)
def organization_delete(self, org_id):
assert org_id
url = self.url + '/api/action/organization_delete'
data_string = json.dumps({'id': org_id})
self.send_request(data_string, url)
url = self.url + '/api/action/organization_purge'
data_string = json.dumps({'id': org_id})
self.send_request(data_string, url)
def find_organization(self, organization_name):
found_organization = False
result = None
try:
result = self.organization_show(organization_name)
found_organization = True
except Exception as __:
found_organization = False
return found_organization, result
def delete_resources_not_with_name_in(self, names, package_id):
''' Deletes resources that DO NOT have their name in names list
:param names: names of resources NOT to be deleted
:type names: list of strings
:param package_id: package id or name
:type package_id: string
'''
assert package_id
dataset = self.get_package(package_id)
resources = dataset['resources']
errors = []
for resource in resources:
if resource['name'] not in names:
if self._is_datastore_resource(resource):
try:
self.datastore_delete(resource['id'])
except urllib2.HTTPError, e:
msg = 'Failed to delete datastore resource [{0}]: {1}'\
.format(resource['id'], str(e))
errors.append(msg)
try:
self.resource_delete(resource['id'])
except urllib2.HTTPError, e:
msg = 'Failed to delete resource [{0}]: {1}'\
.format(resource['id'], str(e))
errors.append(msg)
return errors
|
Wesalius/EloBot
|
refs/heads/master
|
pywikibot/tools/chars.py
|
2
|
# -*- coding: utf-8 -*-
"""Character based helper functions(not wiki-dependent)."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import sys
from pywikibot.tools import LazyRegex
if sys.version_info[0] > 2:
unicode = str
# All characters in the Cf category in a static list. When testing each Unicode
# codepoint it takes longer especially when working with UCS2. The lists also
# differ between Python versions which can be avoided by this static list.
_category_cf = frozenset([
'\U000000ad', '\U00000600', '\U00000601', '\U00000602', '\U00000603',
'\U00000604', '\U0000061c', '\U000006dd', '\U0000070f', '\U0000180e',
'\U0000200b', '\U0000200c', '\U0000200d', '\U0000200e', '\U0000200f',
'\U0000202a', '\U0000202b', '\U0000202c', '\U0000202d', '\U0000202e',
'\U00002060', '\U00002061', '\U00002062', '\U00002063', '\U00002064',
'\U00002066', '\U00002067', '\U00002068', '\U00002069', '\U0000206a',
'\U0000206b', '\U0000206c', '\U0000206d', '\U0000206e', '\U0000206f',
'\U0000feff', '\U0000fff9', '\U0000fffa', '\U0000fffb', '\U000110bd',
'\U0001d173', '\U0001d174', '\U0001d175', '\U0001d176', '\U0001d177',
'\U0001d178', '\U0001d179', '\U0001d17a', '\U000e0001', '\U000e0020',
'\U000e0021', '\U000e0022', '\U000e0023', '\U000e0024', '\U000e0025',
'\U000e0026', '\U000e0027', '\U000e0028', '\U000e0029', '\U000e002a',
'\U000e002b', '\U000e002c', '\U000e002d', '\U000e002e', '\U000e002f',
'\U000e0030', '\U000e0031', '\U000e0032', '\U000e0033', '\U000e0034',
'\U000e0035', '\U000e0036', '\U000e0037', '\U000e0038', '\U000e0039',
'\U000e003a', '\U000e003b', '\U000e003c', '\U000e003d', '\U000e003e',
'\U000e003f', '\U000e0040', '\U000e0041', '\U000e0042', '\U000e0043',
'\U000e0044', '\U000e0045', '\U000e0046', '\U000e0047', '\U000e0048',
'\U000e0049', '\U000e004a', '\U000e004b', '\U000e004c', '\U000e004d',
'\U000e004e', '\U000e004f', '\U000e0050', '\U000e0051', '\U000e0052',
'\U000e0053', '\U000e0054', '\U000e0055', '\U000e0056', '\U000e0057',
'\U000e0058', '\U000e0059', '\U000e005a', '\U000e005b', '\U000e005c',
'\U000e005d', '\U000e005e', '\U000e005f', '\U000e0060', '\U000e0061',
'\U000e0062', '\U000e0063', '\U000e0064', '\U000e0065', '\U000e0066',
'\U000e0067', '\U000e0068', '\U000e0069', '\U000e006a', '\U000e006b',
'\U000e006c', '\U000e006d', '\U000e006e', '\U000e006f', '\U000e0070',
'\U000e0071', '\U000e0072', '\U000e0073', '\U000e0074', '\U000e0075',
'\U000e0076', '\U000e0077', '\U000e0078', '\U000e0079', '\U000e007a',
'\U000e007b', '\U000e007c', '\U000e007d', '\U000e007e', '\U000e007f',
])
# This is a set of all invisible characters
# At the moment we've only added the characters from the Cf category
_invisible_chars = frozenset(_category_cf)
invisible_regex = LazyRegex(
lambda: '[' + ''.join(_invisible_chars) + ']'
)
def contains_invisible(text):
"""Return True if the text contain any of the invisible characters."""
return any(char in _invisible_chars for char in text)
def replace_invisible(text):
"""Replace invisible characters by '<codepoint>'."""
def replace(match):
match = match.group()
if sys.maxunicode < 0x10ffff and len(match) == 2:
mask = (1 << 10) - 1
assert(ord(match[0]) & ~mask == 0xd800)
assert(ord(match[1]) & ~mask == 0xdc00)
codepoint = (ord(match[0]) & mask) << 10 | (ord(match[1]) & mask)
else:
codepoint = ord(match)
return '<{0:x}>'.format(codepoint)
return invisible_regex.sub(replace, text)
|
pointhi/kicad-footprint-generator
|
refs/heads/master
|
scripts/Connector/Connector_Hirose/conn_hirose_df12e_dp_smd_top.py
|
1
|
#!/usr/bin/env python3
'''
kicad-footprint-generator is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
kicad-footprint-generator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
'''
import sys
import os
#sys.path.append(os.path.join(sys.path[0],"..","..","kicad_mod")) # load kicad_mod path
# export PYTHONPATH="${PYTHONPATH}<path to kicad-footprint-generator directory>"
sys.path.append(os.path.join(sys.path[0], "..", "..", "..")) # load parent path of KicadModTree
from math import sqrt
import argparse
import yaml
from helpers import *
from KicadModTree import *
sys.path.append(os.path.join(sys.path[0], "..", "..", "tools")) # load parent path of tools
from footprint_text_fields import addTextFields
series = 'DF12'
series_long = 'DF12E SMD'
manufacturer = 'Hirose'
orientation = 'V'
number_of_rows = 2
datasheet = 'https://www.hirose.com/product/document?clcode=CL0537-0834-6-81&productname=DF12E(3.0)-50DP-0.5V(81)&series=DF12&documenttype=2DDrawing&lang=en&documentid=0000992393'
#Hirose part number
part_code = "DF12E3.0-{n:02}DP-0.5V"
pitch = 0.5
pad_size = [0.3, 1.6]
pad_size_paste = [0.28,1.2]
pins_per_row_range = [10,20,30,40,50,60,80,14,32,36]
def generate_one_footprint(idx, pins, configuration):
mpn = part_code.format(n=pins)
# handle arguments
orientation_str = configuration['orientation_options'][orientation]
footprint_name = configuration['fp_name_format_string'].format(man=manufacturer,
series=series,
mpn=mpn, num_rows=number_of_rows, pins_per_row=int(pins/2), mounting_pad = "",
pitch=pitch, orientation=orientation_str)
kicad_mod = Footprint(footprint_name)
kicad_mod.setAttribute('smd')
kicad_mod.setDescription("{:s} {:s}, {:s}, {:d} Pins per row ({:s}), generated with kicad-footprint-generator".format(manufacturer, series_long, mpn, pins, datasheet))
kicad_mod.setTags(configuration['keyword_fp_string'].format(series=series,
orientation=orientation_str, man=manufacturer,
entry=configuration['entry_direction'][orientation]))
########################## Dimensions ##############################
if(idx == 6): #80 Pins
A = 22.2
B = 19.5
elif(idx == 7): #14 Pins
A = 5.7
B = 3.0
elif (idx == 8): #32 Pins
A = 10.2
B = 7.5
elif (idx == 9): #36 Pins
A = 11.2
B = 8.5
else:
A = 4.7 + (idx * 2.5)
B = A - 2.7
body_edge_out={
'left': round(-A/2 ,2),
'right': round(A/2 ,2),
'top': -1.9,
'bottom': 1.9
}
D = A - 1.5
body_edge_in={
'left': round(-D/2 ,2),
'right': round(D/2 ,2),
'top': -1.225,
'bottom': 1.225
}
############################# Pads ##################################
#
# Add pads
#
#Pad only with F.Cu and F.Mask
CPins=int(pins / 2)
kicad_mod.append(PadArray(start=[-B/2, 1.8], initial=1,
pincount=CPins, increment=1, x_spacing=pitch, size=pad_size,
type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT, layers=["F.Cu", "F.Mask"]))
kicad_mod.append(PadArray(start=[-B/2, -1.8], initial=CPins + 1,
pincount=CPins, increment=1, x_spacing=pitch, size=pad_size,
type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT, layers=["F.Cu", "F.Mask"]))
#F.Paste
kicad_mod.append(PadArray(start=[-B/2, 2], initial='', increment=0,
pincount=CPins, x_spacing=pitch, size=pad_size_paste,
type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT, layers=["F.Paste"]))
kicad_mod.append(PadArray(start=[-B/2, -2], initial='', increment=0,
pincount=CPins, x_spacing=pitch, size=pad_size_paste,
type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT, layers=["F.Paste"]))
######################## Fabrication Layer ###########################
main_body_out_poly= [
{'x': body_edge_out['left'], 'y': body_edge_out['bottom']},
{'x': body_edge_out['left'], 'y': body_edge_out['top']},
{'x': body_edge_out['right'], 'y': body_edge_out['top']},
{'x': body_edge_out['right'], 'y': body_edge_out['bottom']},
{'x': body_edge_out['left'], 'y': body_edge_out['bottom']}
]
kicad_mod.append(PolygoneLine(polygone=main_body_out_poly,
width=configuration['fab_line_width'], layer="F.Fab"))
main_body_in_poly= [
{'x': body_edge_in['left'], 'y': body_edge_in['bottom']},
{'x': body_edge_in['left'], 'y': body_edge_in['top']},
{'x': body_edge_in['right'], 'y': body_edge_in['top']},
{'x': body_edge_in['right'], 'y': body_edge_in['bottom']},
{'x': body_edge_in['left'], 'y': body_edge_in['bottom']}
]
kicad_mod.append(PolygoneLine(polygone=main_body_in_poly,
width=configuration['fab_line_width'], layer="F.Fab"))
main_arrow_poly= [
{'x': (-B/2)-0.2, 'y': body_edge_out['bottom'] + 0.75},
{'x': -B/2, 'y': 1.8},
{'x': (-B/2)+0.2, 'y': body_edge_out['bottom'] + 0.75},
{'x': (-B/2)-0.2, 'y': body_edge_out['bottom'] + 0.75}
]
kicad_mod.append(PolygoneLine(polygone=main_arrow_poly,
width=configuration['fab_line_width'], layer="F.Fab"))
######################## SilkS Layer ###########################
offset = (pad_size[0]/2)+0.2+.06
poly_left= [
{'x': -(B/2) - offset, 'y': body_edge_out['bottom'] + (pad_size[1]/3)},
{'x': -(B/2) - offset, 'y': body_edge_out['bottom'] + configuration['silk_fab_offset']},
{'x': -(A/2) - configuration['silk_fab_offset'], 'y': body_edge_out['bottom'] + configuration['silk_fab_offset']},
{'x': body_edge_out['left'] - configuration['silk_fab_offset'], 'y': body_edge_out['top'] - configuration['silk_fab_offset']},
{'x': -(B/2) - offset, 'y': body_edge_out['top'] - configuration['silk_fab_offset']}
]
kicad_mod.append(PolygoneLine(polygone=poly_left,
width=configuration['silk_line_width'], layer="F.SilkS"))
poly_right= [
{'x': (B/2) + offset, 'y': body_edge_out['bottom'] + configuration['silk_fab_offset']},
{'x': (A/2) + configuration['silk_fab_offset'], 'y': body_edge_out['bottom'] + configuration['silk_fab_offset']},
{'x': body_edge_out['right'] + configuration['silk_fab_offset'], 'y': body_edge_out['top'] - configuration['silk_fab_offset']},
{'x': (B/2) + offset, 'y': body_edge_out['top'] - configuration['silk_fab_offset']}
]
kicad_mod.append(PolygoneLine(polygone=poly_right,
width=configuration['silk_line_width'], layer="F.SilkS"))
######################## CrtYd Layer ###########################
CrtYd_offset = configuration['courtyard_offset']['connector']
CrtYd_grid = configuration['courtyard_grid']
poly_yd = [
{'x': roundToBase(body_edge_out['left'] - CrtYd_offset, CrtYd_grid), 'y': roundToBase(-2.6 - CrtYd_offset, CrtYd_grid)},
{'x': roundToBase(body_edge_out['left'] - CrtYd_offset, CrtYd_grid), 'y': roundToBase(2.6 + CrtYd_offset, CrtYd_grid)},
{'x': roundToBase(body_edge_out['right'] + CrtYd_offset, CrtYd_grid), 'y': roundToBase(2.6 + CrtYd_offset, CrtYd_grid)},
{'x': roundToBase(body_edge_out['right'] + CrtYd_offset, CrtYd_grid), 'y': roundToBase(-2.6 - CrtYd_offset, CrtYd_grid)},
{'x': roundToBase(body_edge_out['left'] - CrtYd_offset, CrtYd_grid), 'y': roundToBase(-2.6 - CrtYd_offset, CrtYd_grid)}
]
kicad_mod.append(PolygoneLine(polygone=poly_yd,
layer='F.CrtYd', width=configuration['courtyard_line_width']))
######################### Text Fields ###############################
cy1 = roundToBase(body_edge_out['top'] -1 - configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cy2 = roundToBase(body_edge_out['bottom'] + 1 + configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
addTextFields(kicad_mod=kicad_mod, configuration=configuration, body_edges=body_edge_out,
courtyard={'top':cy1, 'bottom':cy2}, fp_name=footprint_name, text_y_inside_position='top')
##################### Write to File and 3D ############################
model3d_path_prefix = configuration.get('3d_model_prefix','${KISYS3DMOD}/')
lib_name = configuration['lib_name_format_string'].format(series=series, man=manufacturer)
model_name = '{model3d_path_prefix:s}{lib_name:s}.3dshapes/{fp_name:s}.wrl'.format(
model3d_path_prefix=model3d_path_prefix, lib_name=lib_name, fp_name=footprint_name)
kicad_mod.append(Model(filename=model_name))
output_dir = '{lib_name:s}.pretty/'.format(lib_name=lib_name)
if not os.path.isdir(output_dir): #returns false if path does not yet exist!! (Does not check path validity)
os.makedirs(output_dir)
filename = '{outdir:s}{fp_name:s}.kicad_mod'.format(outdir=output_dir, fp_name=footprint_name)
file_handler = KicadFileHandler(kicad_mod)
file_handler.writeFile(filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='use confing .yaml files to create footprints.')
parser.add_argument('--global_config', type=str, nargs='?', help='the config file defining how the footprint will look like. (KLC)', default='../../tools/global_config_files/config_KLCv3.0.yaml')
parser.add_argument('--series_config', type=str, nargs='?', help='the config file defining series parameters.', default='../conn_config_KLCv3.yaml')
args = parser.parse_args()
with open(args.global_config, 'r') as config_stream:
try:
configuration = yaml.safe_load(config_stream)
except yaml.YAMLError as exc:
print(exc)
with open(args.series_config, 'r') as config_stream:
try:
configuration.update(yaml.safe_load(config_stream))
except yaml.YAMLError as exc:
print(exc)
idx = 0
for pincount in pins_per_row_range:
generate_one_footprint(idx, pincount, configuration)
idx += 1
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-3.5.0/Lib/test/test_contains.py
|
8
|
from collections import deque
import unittest
class base_set:
def __init__(self, el):
self.el = el
class myset(base_set):
def __contains__(self, el):
return self.el == el
class seq(base_set):
def __getitem__(self, n):
return [self.el][n]
class TestContains(unittest.TestCase):
def test_common_tests(self):
a = base_set(1)
b = myset(1)
c = seq(1)
self.assertIn(1, b)
self.assertNotIn(0, b)
self.assertIn(1, c)
self.assertNotIn(0, c)
self.assertRaises(TypeError, lambda: 1 in a)
self.assertRaises(TypeError, lambda: 1 not in a)
# test char in string
self.assertIn('c', 'abc')
self.assertNotIn('d', 'abc')
self.assertIn('', '')
self.assertIn('', 'abc')
self.assertRaises(TypeError, lambda: None in 'abc')
def test_builtin_sequence_types(self):
# a collection of tests on builtin sequence types
a = range(10)
for i in a:
self.assertIn(i, a)
self.assertNotIn(16, a)
self.assertNotIn(a, a)
a = tuple(a)
for i in a:
self.assertIn(i, a)
self.assertNotIn(16, a)
self.assertNotIn(a, a)
class Deviant1:
"""Behaves strangely when compared
This class is designed to make sure that the contains code
works when the list is modified during the check.
"""
aList = list(range(15))
def __eq__(self, other):
if other == 12:
self.aList.remove(12)
self.aList.remove(13)
self.aList.remove(14)
return 0
self.assertNotIn(Deviant1(), Deviant1.aList)
def test_nonreflexive(self):
# containment and equality tests involving elements that are
# not necessarily equal to themselves
class MyNonReflexive(object):
def __eq__(self, other):
return False
def __hash__(self):
return 28
values = float('nan'), 1, None, 'abc', MyNonReflexive()
constructors = list, tuple, dict.fromkeys, set, frozenset, deque
for constructor in constructors:
container = constructor(values)
for elem in container:
self.assertIn(elem, container)
self.assertTrue(container == constructor(values))
self.assertTrue(container == container)
if __name__ == '__main__':
unittest.main()
|
harveyxia/luigi
|
refs/heads/master
|
test/worker_parallel_scheduling_test.py
|
7
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pickle
import time
from helpers import unittest
import luigi
import mock
from luigi.worker import Worker
class SlowCompleteWrapper(luigi.WrapperTask):
def requires(self):
return [SlowCompleteTask(i) for i in range(4)]
class SlowCompleteTask(luigi.Task):
n = luigi.IntParameter()
def complete(self):
time.sleep(0.1)
return True
class OverlappingSelfDependenciesTask(luigi.Task):
n = luigi.IntParameter()
k = luigi.IntParameter()
def complete(self):
return self.n < self.k or self.k == 0
def requires(self):
return [OverlappingSelfDependenciesTask(self.n - 1, k) for k in range(self.k + 1)]
class ExceptionCompleteTask(luigi.Task):
def complete(self):
assert False
class ExceptionRequiresTask(luigi.Task):
def requires(self):
assert False
class UnpicklableExceptionTask(luigi.Task):
def complete(self):
class UnpicklableException(Exception):
pass
raise UnpicklableException()
class ParallelSchedulingTest(unittest.TestCase):
def setUp(self):
self.sch = mock.Mock()
self.w = Worker(scheduler=self.sch, worker_id='x')
def added_tasks(self, status):
return [kw['task_id'] for args, kw in self.sch.add_task.call_args_list if kw['status'] == status]
def test_multiprocess_scheduling_with_overlapping_dependencies(self):
self.w.add(OverlappingSelfDependenciesTask(5, 2), True)
self.assertEqual(15, self.sch.add_task.call_count)
self.assertEqual(set((
'OverlappingSelfDependenciesTask(n=1, k=1)',
'OverlappingSelfDependenciesTask(n=2, k=1)',
'OverlappingSelfDependenciesTask(n=2, k=2)',
'OverlappingSelfDependenciesTask(n=3, k=1)',
'OverlappingSelfDependenciesTask(n=3, k=2)',
'OverlappingSelfDependenciesTask(n=4, k=1)',
'OverlappingSelfDependenciesTask(n=4, k=2)',
'OverlappingSelfDependenciesTask(n=5, k=2)',
)), set(self.added_tasks('PENDING')))
self.assertEqual(set((
'OverlappingSelfDependenciesTask(n=0, k=0)',
'OverlappingSelfDependenciesTask(n=0, k=1)',
'OverlappingSelfDependenciesTask(n=1, k=0)',
'OverlappingSelfDependenciesTask(n=1, k=2)',
'OverlappingSelfDependenciesTask(n=2, k=0)',
'OverlappingSelfDependenciesTask(n=3, k=0)',
'OverlappingSelfDependenciesTask(n=4, k=0)',
)), set(self.added_tasks('DONE')))
@mock.patch('luigi.notifications.send_error_email')
def test_raise_exception_in_complete(self, send):
self.w.add(ExceptionCompleteTask(), multiprocess=True)
send.check_called_once()
self.assertEqual(0, self.sch.add_task.call_count)
self.assertTrue('assert False' in send.call_args[0][1])
@mock.patch('luigi.notifications.send_error_email')
def test_raise_unpicklable_exception_in_complete(self, send):
# verify exception can't be pickled
self.assertRaises(Exception, UnpicklableExceptionTask().complete)
try:
UnpicklableExceptionTask().complete()
except Exception as e:
ex = e
self.assertRaises(pickle.PicklingError, pickle.dumps, ex)
# verify this can run async
self.w.add(UnpicklableExceptionTask(), multiprocess=True)
send.check_called_once()
self.assertEqual(0, self.sch.add_task.call_count)
self.assertTrue('raise UnpicklableException()' in send.call_args[0][1])
@mock.patch('luigi.notifications.send_error_email')
def test_raise_exception_in_requires(self, send):
self.w.add(ExceptionRequiresTask(), multiprocess=True)
send.check_called_once()
self.assertEqual(0, self.sch.add_task.call_count)
if __name__ == '__main__':
unittest.main()
|
111pontes/ydk-py
|
refs/heads/master
|
cisco-ios-xr/ydk/models/__init__.py
|
179
|
import pkg_resources
pkg_resources.declare_namespace(__name__)
|
zhoffice/minos
|
refs/heads/master
|
supervisor/supervisor/tests/test_process.py
|
5
|
import os
import signal
import time
import unittest
import sys
import errno
from mock import Mock, patch, sentinel
from supervisor.tests.base import DummyOptions
from supervisor.tests.base import DummyPConfig
from supervisor.tests.base import DummyProcess
from supervisor.tests.base import DummyPGroupConfig
from supervisor.tests.base import DummyDispatcher
from supervisor.tests.base import DummyEvent
from supervisor.tests.base import DummyFCGIGroupConfig
from supervisor.tests.base import DummySocketConfig
from supervisor.tests.base import DummyProcessGroup
from supervisor.tests.base import DummyFCGIProcessGroup
from supervisor.tests.base import DummySocketManager
from supervisor.process import Subprocess
class SubprocessTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.process import Subprocess
return Subprocess
def _makeOne(self, *arg, **kw):
return self._getTargetClass()(*arg, **kw)
def tearDown(self):
from supervisor.events import clear
clear()
def test_getProcessStateDescription(self):
from supervisor.states import ProcessStates
from supervisor.process import getProcessStateDescription
for statename, code in ProcessStates.__dict__.items():
self.assertEqual(getProcessStateDescription(code), statename)
def test_ctor(self):
options = DummyOptions()
config = DummyPConfig(options, 'cat', 'bin/cat',
stdout_logfile='/tmp/temp123.log',
stderr_logfile='/tmp/temp456.log')
instance = self._makeOne(config)
self.assertEqual(instance.config, config)
self.assertEqual(instance.config.options, options)
self.assertEqual(instance.laststart, 0)
self.assertEqual(instance.pid, 0)
self.assertEqual(instance.laststart, 0)
self.assertEqual(instance.laststop, 0)
self.assertEqual(instance.delay, 0)
self.assertEqual(instance.administrative_stop, 0)
self.assertEqual(instance.killing, 0)
self.assertEqual(instance.backoff, 0)
self.assertEqual(instance.pipes, {})
self.assertEqual(instance.dispatchers, {})
self.assertEqual(instance.spawnerr, None)
def test_repr(self):
options = DummyOptions()
config = DummyPConfig(options, 'cat', 'bin/cat')
instance = self._makeOne(config)
s = repr(instance)
self.assertTrue(s.startswith('<Subprocess at'))
self.assertTrue(s.endswith('with name cat in state STOPPED>'))
def test_reopenlogs(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.dispatchers = {0:DummyDispatcher(readable=True),
1:DummyDispatcher(writable=True)}
instance.reopenlogs()
self.assertEqual(instance.dispatchers[0].logs_reopened, True)
self.assertEqual(instance.dispatchers[1].logs_reopened, False)
def test_removelogs(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.dispatchers = {0:DummyDispatcher(readable=True),
1:DummyDispatcher(writable=True)}
instance.removelogs()
self.assertEqual(instance.dispatchers[0].logs_removed, True)
self.assertEqual(instance.dispatchers[1].logs_removed, False)
def test_drain(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test',
stdout_logfile='/tmp/foo',
stderr_logfile='/tmp/bar')
instance = self._makeOne(config)
instance.dispatchers = {0:DummyDispatcher(readable=True),
1:DummyDispatcher(writable=True)}
instance.drain()
self.assertTrue(instance.dispatchers[0].read_event_handled)
self.assertTrue(instance.dispatchers[1].write_event_handled)
def test_get_execv_args_abs_missing(self):
options = DummyOptions()
config = DummyPConfig(options, 'notthere', '/notthere')
instance = self._makeOne(config)
args = instance.get_execv_args()
self.assertEqual(args, ('/notthere', ['/notthere']))
def test_get_execv_args_abs_withquotes_missing(self):
options = DummyOptions()
config = DummyPConfig(options, 'notthere', '/notthere "an argument"')
instance = self._makeOne(config)
args = instance.get_execv_args()
self.assertEqual(args, ('/notthere', ['/notthere', 'an argument']))
def test_get_execv_args_rel_missing(self):
options = DummyOptions()
config = DummyPConfig(options, 'notthere', 'notthere')
instance = self._makeOne(config)
args = instance.get_execv_args()
self.assertEqual(args, ('notthere', ['notthere']))
def test_get_execv_args_rel_withquotes_missing(self):
options = DummyOptions()
config = DummyPConfig(options, 'notthere', 'notthere "an argument"')
instance = self._makeOne(config)
args = instance.get_execv_args()
self.assertEqual(args, ('notthere', ['notthere', 'an argument']))
def test_get_execv_args_abs(self):
executable = '/bin/sh foo'
options = DummyOptions()
config = DummyPConfig(options, 'sh', executable)
instance = self._makeOne(config)
args = instance.get_execv_args()
self.assertEqual(len(args), 2)
self.assertEqual(args[0], '/bin/sh')
self.assertEqual(args[1], ['/bin/sh', 'foo'])
def test_get_execv_args_rel(self):
executable = 'sh foo'
options = DummyOptions()
config = DummyPConfig(options, 'sh', executable)
instance = self._makeOne(config)
args = instance.get_execv_args()
self.assertEqual(len(args), 2)
self.assertEqual(args[0], '/bin/sh')
self.assertEqual(args[1], ['sh', 'foo'])
def test_record_spawnerr(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.record_spawnerr('foo')
self.assertEqual(instance.spawnerr, 'foo')
self.assertEqual(options.logger.data[0], 'spawnerr: foo')
def test_spawn_already_running(self):
options = DummyOptions()
config = DummyPConfig(options, 'sh', '/bin/sh')
instance = self._makeOne(config)
instance.pid = True
from supervisor.states import ProcessStates
instance.state = ProcessStates.RUNNING
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.logger.data[0], "process 'sh' already running")
self.assertEqual(instance.state, ProcessStates.RUNNING)
def test_spawn_fail_check_execv_args(self):
options = DummyOptions()
config = DummyPConfig(options, 'bad', '/bad/filename')
instance = self._makeOne(config)
from supervisor.states import ProcessStates
instance.state = ProcessStates.BACKOFF
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(instance.spawnerr, 'bad filename')
self.assertEqual(options.logger.data[0], "spawnerr: bad filename")
self.failUnless(instance.delay)
self.failUnless(instance.backoff)
from supervisor.states import ProcessStates
self.assertEqual(instance.state, ProcessStates.BACKOFF)
self.assertEqual(len(L), 2)
event1 = L[0]
event2 = L[1]
self.assertEqual(event1.__class__, events.ProcessStateStartingEvent)
self.assertEqual(event2.__class__, events.ProcessStateBackoffEvent)
def test_spawn_fail_make_pipes_emfile(self):
options = DummyOptions()
import errno
options.make_pipes_error = errno.EMFILE
config = DummyPConfig(options, 'good', '/good/filename')
instance = self._makeOne(config)
from supervisor.states import ProcessStates
instance.state = ProcessStates.BACKOFF
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(instance.spawnerr,
"too many open files to spawn 'good'")
self.assertEqual(options.logger.data[0],
"spawnerr: too many open files to spawn 'good'")
self.failUnless(instance.delay)
self.failUnless(instance.backoff)
from supervisor.states import ProcessStates
self.assertEqual(instance.state, ProcessStates.BACKOFF)
self.assertEqual(len(L), 2)
event1, event2 = L
self.assertEqual(event1.__class__, events.ProcessStateStartingEvent)
self.assertEqual(event2.__class__, events.ProcessStateBackoffEvent)
def test_spawn_fail_make_pipes_other(self):
options = DummyOptions()
options.make_pipes_error = 1
config = DummyPConfig(options, 'good', '/good/filename')
instance = self._makeOne(config)
from supervisor.states import ProcessStates
instance.state = ProcessStates.BACKOFF
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(instance.spawnerr, 'unknown error: EPERM')
self.assertEqual(options.logger.data[0],
"spawnerr: unknown error: EPERM")
self.failUnless(instance.delay)
self.failUnless(instance.backoff)
from supervisor.states import ProcessStates
self.assertEqual(instance.state, ProcessStates.BACKOFF)
self.assertEqual(len(L), 2)
event1, event2 = L
self.assertEqual(event1.__class__, events.ProcessStateStartingEvent)
self.assertEqual(event2.__class__, events.ProcessStateBackoffEvent)
def test_spawn_fork_fail_eagain(self):
options = DummyOptions()
import errno
options.fork_error = errno.EAGAIN
config = DummyPConfig(options, 'good', '/good/filename')
instance = self._makeOne(config)
from supervisor.states import ProcessStates
instance.state = ProcessStates.BACKOFF
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(instance.spawnerr,
"Too many processes in process table to spawn 'good'")
self.assertEqual(options.logger.data[0],
"spawnerr: Too many processes in process table to spawn 'good'")
self.assertEqual(len(options.parent_pipes_closed), 6)
self.assertEqual(len(options.child_pipes_closed), 6)
self.failUnless(instance.delay)
self.failUnless(instance.backoff)
from supervisor.states import ProcessStates
self.assertEqual(instance.state, ProcessStates.BACKOFF)
self.assertEqual(len(L), 2)
event1, event2 = L
self.assertEqual(event1.__class__, events.ProcessStateStartingEvent)
self.assertEqual(event2.__class__, events.ProcessStateBackoffEvent)
def test_spawn_fork_fail_other(self):
options = DummyOptions()
options.fork_error = 1
config = DummyPConfig(options, 'good', '/good/filename')
instance = self._makeOne(config)
from supervisor.states import ProcessStates
instance.state = ProcessStates.BACKOFF
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(instance.spawnerr, 'unknown error: EPERM')
self.assertEqual(options.logger.data[0],
"spawnerr: unknown error: EPERM")
self.assertEqual(len(options.parent_pipes_closed), 6)
self.assertEqual(len(options.child_pipes_closed), 6)
self.failUnless(instance.delay)
self.failUnless(instance.backoff)
from supervisor.states import ProcessStates
self.assertEqual(instance.state, ProcessStates.BACKOFF)
self.assertEqual(len(L), 2)
event1, event2 = L
self.assertEqual(event1.__class__, events.ProcessStateStartingEvent)
self.assertEqual(event2.__class__, events.ProcessStateBackoffEvent)
def test_spawn_as_child_setuid_ok(self):
options = DummyOptions()
options.forkpid = 0
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.parent_pipes_closed, None)
self.assertEqual(options.child_pipes_closed, None)
self.assertEqual(options.pgrp_set, True)
self.assertEqual(len(options.duped), 3)
self.assertEqual(len(options.fds_closed), options.minfds - 3)
self.assertEqual(options.written, {})
self.assertEqual(options.privsdropped, 1)
self.assertEqual(options.execv_args,
('/good/filename', ['/good/filename']) )
self.assertEqual(options._exitcode, 127)
def test_spawn_as_child_setuid_fail(self):
options = DummyOptions()
options.forkpid = 0
options.setuid_msg = 'screwed'
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.parent_pipes_closed, None)
self.assertEqual(options.child_pipes_closed, None)
self.assertEqual(options.pgrp_set, True)
self.assertEqual(len(options.duped), 3)
self.assertEqual(len(options.fds_closed), options.minfds - 3)
self.assertEqual(options.written,
{2: 'supervisor: error trying to setuid to 1 (screwed)\n'})
self.assertEqual(options.privsdropped, None)
self.assertEqual(options.execv_args,
('/good/filename', ['/good/filename']) )
self.assertEqual(options._exitcode, 127)
def test_spawn_as_child_cwd_ok(self):
options = DummyOptions()
options.forkpid = 0
config = DummyPConfig(options, 'good', '/good/filename',
directory='/tmp')
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.parent_pipes_closed, None)
self.assertEqual(options.child_pipes_closed, None)
self.assertEqual(options.pgrp_set, True)
self.assertEqual(len(options.duped), 3)
self.assertEqual(len(options.fds_closed), options.minfds - 3)
self.assertEqual(options.written, {})
self.assertEqual(options.execv_args,
('/good/filename', ['/good/filename']) )
self.assertEqual(options._exitcode, 127)
self.assertEqual(options.changed_directory, True)
def test_spawn_as_child_sets_umask(self):
options = DummyOptions()
options.forkpid = 0
config = DummyPConfig(options, 'good', '/good/filename', umask=002)
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.written, {})
self.assertEqual(options.execv_args,
('/good/filename', ['/good/filename']) )
self.assertEqual(options._exitcode, 127)
self.assertEqual(options.umaskset, 002)
def test_spawn_as_child_cwd_fail(self):
options = DummyOptions()
options.forkpid = 0
options.chdir_error = 2
config = DummyPConfig(options, 'good', '/good/filename',
directory='/tmp')
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.parent_pipes_closed, None)
self.assertEqual(options.child_pipes_closed, None)
self.assertEqual(options.pgrp_set, True)
self.assertEqual(len(options.duped), 3)
self.assertEqual(len(options.fds_closed), options.minfds - 3)
self.assertEqual(options.execv_args, None)
self.assertEqual(options.written,
{2: "couldn't chdir to /tmp: ENOENT\n"})
self.assertEqual(options._exitcode, 127)
self.assertEqual(options.changed_directory, False)
def test_spawn_as_child_execv_fail_oserror(self):
options = DummyOptions()
options.forkpid = 0
options.execv_error = 1
config = DummyPConfig(options, 'good', '/good/filename')
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.parent_pipes_closed, None)
self.assertEqual(options.child_pipes_closed, None)
self.assertEqual(options.pgrp_set, True)
self.assertEqual(len(options.duped), 3)
self.assertEqual(len(options.fds_closed), options.minfds - 3)
self.assertEqual(options.written,
{2: "couldn't exec /good/filename: EPERM\n"})
self.assertEqual(options.privsdropped, None)
self.assertEqual(options._exitcode, 127)
def test_spawn_as_child_execv_fail_runtime_error(self):
options = DummyOptions()
options.forkpid = 0
options.execv_error = 2
config = DummyPConfig(options, 'good', '/good/filename')
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.parent_pipes_closed, None)
self.assertEqual(options.child_pipes_closed, None)
self.assertEqual(options.pgrp_set, True)
self.assertEqual(len(options.duped), 3)
self.assertEqual(len(options.fds_closed), options.minfds - 3)
msg = options.written[2] # dict, 2 is fd #
self.failUnless(msg.startswith("couldn't exec /good/filename:"))
self.failUnless("exceptions.RuntimeError" in msg)
self.assertEqual(options.privsdropped, None)
self.assertEqual(options._exitcode, 127)
def test_spawn_as_child_uses_pconfig_environment(self):
options = DummyOptions()
options.forkpid = 0
config = DummyPConfig(options, 'cat', '/bin/cat',
environment={'_TEST_':'1'})
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.execv_args, ('/bin/cat', ['/bin/cat']) )
self.assertEqual(options.execv_environment['_TEST_'], '1')
def test_spawn_as_child_environment_supervisor_envvars(self):
options = DummyOptions()
options.forkpid = 0
config = DummyPConfig(options, 'cat', '/bin/cat')
instance = self._makeOne(config)
class Dummy:
name = 'dummy'
instance.group = Dummy()
instance.group.config = Dummy()
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.execv_args, ('/bin/cat', ['/bin/cat']) )
self.assertEqual(
options.execv_environment['SUPERVISOR_ENABLED'], '1')
self.assertEqual(
options.execv_environment['SUPERVISOR_PROCESS_NAME'], 'cat')
self.assertEqual(
options.execv_environment['SUPERVISOR_GROUP_NAME'], 'dummy')
self.assertEqual(
options.execv_environment['SUPERVISOR_SERVER_URL'],
'http://localhost:9001')
def test_spawn_as_child_stderr_redirected(self):
options = DummyOptions()
options.forkpid = 0
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
config.redirect_stderr = True
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(options.parent_pipes_closed, None)
self.assertEqual(options.child_pipes_closed, None)
self.assertEqual(options.pgrp_set, True)
self.assertEqual(len(options.duped), 2)
self.assertEqual(len(options.fds_closed), options.minfds - 3)
self.assertEqual(options.written, {})
self.assertEqual(options.privsdropped, 1)
self.assertEqual(options.execv_args,
('/good/filename', ['/good/filename']) )
self.assertEqual(options._exitcode, 127)
def test_spawn_as_parent(self):
options = DummyOptions()
options.forkpid = 10
config = DummyPConfig(options, 'good', '/good/filename')
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, 10)
self.assertEqual(instance.dispatchers[4].__class__, DummyDispatcher)
self.assertEqual(instance.dispatchers[5].__class__, DummyDispatcher)
self.assertEqual(instance.dispatchers[7].__class__, DummyDispatcher)
self.assertEqual(instance.pipes['stdin'], 4)
self.assertEqual(instance.pipes['stdout'], 5)
self.assertEqual(instance.pipes['stderr'], 7)
self.assertEqual(options.parent_pipes_closed, None)
self.assertEqual(len(options.child_pipes_closed), 6)
self.assertEqual(options.logger.data[0], "spawned: 'good' with pid 10")
self.assertEqual(instance.spawnerr, None)
self.failUnless(instance.delay)
self.assertEqual(instance.config.options.pidhistory[10], instance)
from supervisor.states import ProcessStates
self.assertEqual(instance.state, ProcessStates.STARTING)
def test_spawn_redirect_stderr(self):
options = DummyOptions()
options.forkpid = 10
config = DummyPConfig(options, 'good', '/good/filename',
redirect_stderr=True)
instance = self._makeOne(config)
result = instance.spawn()
self.assertEqual(result, 10)
self.assertEqual(instance.dispatchers[4].__class__, DummyDispatcher)
self.assertEqual(instance.dispatchers[5].__class__, DummyDispatcher)
self.assertEqual(instance.pipes['stdin'], 4)
self.assertEqual(instance.pipes['stdout'], 5)
self.assertEqual(instance.pipes['stderr'], None)
def test_write(self):
executable = '/bin/cat'
options = DummyOptions()
config = DummyPConfig(options, 'output', executable)
instance = self._makeOne(config)
sent = 'a' * (1 << 13)
self.assertRaises(OSError, instance.write, sent)
options.forkpid = 1
result = instance.spawn()
instance.write(sent)
stdin_fd = instance.pipes['stdin']
self.assertEqual(sent, instance.dispatchers[stdin_fd].input_buffer)
instance.killing = True
self.assertRaises(OSError, instance.write, sent)
def test_write_dispatcher_closed(self):
executable = '/bin/cat'
options = DummyOptions()
config = DummyPConfig(options, 'output', executable)
instance = self._makeOne(config)
sent = 'a' * (1 << 13)
self.assertRaises(OSError, instance.write, sent)
options.forkpid = 1
result = instance.spawn()
stdin_fd = instance.pipes['stdin']
instance.dispatchers[stdin_fd].close()
self.assertRaises(OSError, instance.write, sent)
def test_write_dispatcher_flush_raises_epipe(self):
executable = '/bin/cat'
options = DummyOptions()
config = DummyPConfig(options, 'output', executable)
instance = self._makeOne(config)
sent = 'a' * (1 << 13)
self.assertRaises(OSError, instance.write, sent)
options.forkpid = 1
result = instance.spawn()
stdin_fd = instance.pipes['stdin']
instance.dispatchers[stdin_fd].flush_error = errno.EPIPE
self.assertRaises(OSError, instance.write, sent)
def dont_test_spawn_and_kill(self):
# this is a functional test
from supervisor.tests.base import makeSpew
try:
called = 0
def foo(*args):
called = 1
signal.signal(signal.SIGCHLD, foo)
executable = makeSpew()
options = DummyOptions()
config = DummyPConfig(options, 'spew', executable)
instance = self._makeOne(config)
result = instance.spawn()
msg = options.logger.data[0]
self.failUnless(msg.startswith("spawned: 'spew' with pid"))
self.assertEqual(len(instance.pipes), 6)
self.failUnless(instance.pid)
self.failUnlessEqual(instance.pid, result)
origpid = instance.pid
import errno
while 1:
try:
data = os.popen('ps').read()
break
except IOError, why:
if why[0] != errno.EINTR:
raise
# try again ;-)
time.sleep(0.1) # arbitrary, race condition possible
self.failUnless(data.find(`origpid`) != -1 )
msg = instance.kill(signal.SIGTERM)
time.sleep(0.1) # arbitrary, race condition possible
self.assertEqual(msg, None)
pid, sts = os.waitpid(-1, os.WNOHANG)
data = os.popen('ps').read()
self.assertEqual(data.find(`origpid`), -1) # dubious
finally:
try:
os.remove(executable)
except:
pass
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def test_stop(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.pid = 11
dispatcher = DummyDispatcher(writable=True)
instance.dispatchers = {'foo':dispatcher}
from supervisor.states import ProcessStates
instance.state = ProcessStates.RUNNING
instance.stop()
self.assertEqual(instance.administrative_stop, 1)
self.failUnless(instance.delay)
self.assertEqual(options.logger.data[0], 'killing test (pid 11) with '
'signal SIGTERM')
self.assertEqual(instance.killing, 1)
self.assertEqual(options.kills[11], signal.SIGTERM)
def test_give_up(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
L = []
from supervisor.states import ProcessStates
from supervisor import events
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
instance.state = ProcessStates.BACKOFF
instance.give_up()
self.assertEqual(instance.system_stop, 1)
self.assertFalse(instance.delay)
self.assertFalse(instance.backoff)
self.assertEqual(instance.state, ProcessStates.FATAL)
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateFatalEvent)
def test_kill_nopid(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.kill(signal.SIGTERM)
self.assertEqual(options.logger.data[0],
'attempted to kill test with sig SIGTERM but it wasn\'t running')
self.assertEqual(instance.killing, 0)
def test_kill_error(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
options.kill_error = 1
instance = self._makeOne(config)
L = []
from supervisor.states import ProcessStates
from supervisor import events
events.subscribe(events.ProcessStateEvent,
lambda x: L.append(x))
instance.pid = 11
instance.state = ProcessStates.RUNNING
instance.kill(signal.SIGTERM)
self.assertEqual(options.logger.data[0], 'killing test (pid 11) with '
'signal SIGTERM')
self.failUnless(options.logger.data[1].startswith(
'unknown problem killing test'))
self.assertEqual(instance.killing, 0)
self.assertEqual(len(L), 2)
event1 = L[0]
event2 = L[1]
self.assertEqual(event1.__class__, events.ProcessStateStoppingEvent)
self.assertEqual(event2.__class__, events.ProcessStateUnknownEvent)
def test_kill_from_starting(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.pid = 11
L = []
from supervisor.states import ProcessStates
from supervisor import events
events.subscribe(events.ProcessStateEvent,lambda x: L.append(x))
instance.state = ProcessStates.STARTING
instance.kill(signal.SIGTERM)
self.assertEqual(options.logger.data[0], 'killing test (pid 11) with '
'signal SIGTERM')
self.assertEqual(instance.killing, 1)
self.assertEqual(options.kills[11], signal.SIGTERM)
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateStoppingEvent)
def test_kill_from_running(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.pid = 11
L = []
from supervisor.states import ProcessStates
from supervisor import events
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
instance.state = ProcessStates.RUNNING
instance.kill(signal.SIGTERM)
self.assertEqual(options.logger.data[0], 'killing test (pid 11) with '
'signal SIGTERM')
self.assertEqual(instance.killing, 1)
self.assertEqual(options.kills[11], signal.SIGTERM)
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateStoppingEvent)
def test_kill_from_stopping(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.pid = 11
L = []
from supervisor.states import ProcessStates
from supervisor import events
events.subscribe(events.Event,lambda x: L.append(x))
instance.state = ProcessStates.STOPPING
instance.kill(signal.SIGKILL)
self.assertEqual(options.logger.data[0], 'killing test (pid 11) with '
'signal SIGKILL')
self.assertEqual(instance.killing, 1)
self.assertEqual(options.kills[11], signal.SIGKILL)
self.assertEqual(L, []) # no event because we didn't change state
def test_kill_from_stopping_w_killasgroup(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test', killasgroup=True)
instance = self._makeOne(config)
instance.pid = 11
L = []
from supervisor.states import ProcessStates
from supervisor import events
events.subscribe(events.Event,lambda x: L.append(x))
instance.state = ProcessStates.STOPPING
instance.kill(signal.SIGKILL)
self.assertEqual(options.logger.data[0], 'killing test (pid 11) '
'process group with signal SIGKILL')
self.assertEqual(instance.killing, 1)
self.assertEqual(options.kills[-11], signal.SIGKILL)
self.assertEqual(L, []) # no event because we didn't change state
def test_stopasgroup(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test', stopasgroup=True)
instance = self._makeOne(config)
instance.pid = 11
L = []
from supervisor.states import ProcessStates
from supervisor import events
events.subscribe(events.Event,lambda x: L.append(x))
instance.state = ProcessStates.RUNNING
instance.kill(signal.SIGTERM)
self.assertEqual(options.logger.data[0], 'killing test (pid 11) '
'process group with signal SIGTERM')
self.assertEqual(instance.killing, 1)
self.assertEqual(options.kills[-11], signal.SIGTERM)
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateStoppingEvent)
self.assertEqual(event.extra_values, [('pid', 11)])
self.assertEqual(event.from_state, ProcessStates.RUNNING)
def test_finish(self):
options = DummyOptions()
config = DummyPConfig(options, 'notthere', '/notthere',
stdout_logfile='/tmp/foo')
instance = self._makeOne(config)
instance.waitstatus = (123, 1) # pid, waitstatus
instance.config.options.pidhistory[123] = instance
instance.killing = 1
pipes = {'stdout':'','stderr':''}
instance.pipes = pipes
from supervisor.states import ProcessStates
from supervisor import events
instance.state = ProcessStates.STOPPING
L = []
events.subscribe(events.ProcessStateStoppedEvent, lambda x: L.append(x))
instance.pid = 123
instance.finish(123, 1)
self.assertEqual(instance.killing, 0)
self.assertEqual(instance.pid, 0)
self.assertEqual(options.parent_pipes_closed, pipes)
self.assertEqual(instance.pipes, {})
self.assertEqual(instance.dispatchers, {})
self.assertEqual(options.logger.data[0], 'stopped: notthere '
'(terminated by SIGHUP)')
self.assertEqual(instance.exitstatus, -1)
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateStoppedEvent)
self.assertEqual(event.extra_values, [('pid', 123)])
self.assertEqual(event.from_state, ProcessStates.STOPPING)
def test_finish_expected(self):
options = DummyOptions()
config = DummyPConfig(options, 'notthere', '/notthere',
stdout_logfile='/tmp/foo')
instance = self._makeOne(config)
instance.config.options.pidhistory[123] = instance
pipes = {'stdout':'','stderr':''}
instance.pipes = pipes
instance.config.exitcodes =[-1]
from supervisor.states import ProcessStates
from supervisor import events
instance.state = ProcessStates.RUNNING
L = []
events.subscribe(events.ProcessStateExitedEvent, lambda x: L.append(x))
instance.pid = 123
instance.finish(123, 1)
self.assertEqual(instance.killing, 0)
self.assertEqual(instance.pid, 0)
self.assertEqual(options.parent_pipes_closed, pipes)
self.assertEqual(instance.pipes, {})
self.assertEqual(instance.dispatchers, {})
self.assertEqual(options.logger.data[0],
'exited: notthere (terminated by SIGHUP; expected)')
self.assertEqual(instance.exitstatus, -1)
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__,
events.ProcessStateExitedEvent)
self.assertEqual(event.expected, True)
self.assertEqual(event.extra_values, [('expected', True), ('pid', 123)])
self.assertEqual(event.from_state, ProcessStates.RUNNING)
def test_finish_tooquickly(self):
options = DummyOptions()
config = DummyPConfig(options, 'notthere', '/notthere',
stdout_logfile='/tmp/foo', startsecs=10)
instance = self._makeOne(config)
instance.config.options.pidhistory[123] = instance
pipes = {'stdout':'','stderr':''}
instance.pipes = pipes
instance.config.exitcodes =[-1]
import time
instance.laststart = time.time()
from supervisor.states import ProcessStates
from supervisor import events
instance.state = ProcessStates.STARTING
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
instance.pid = 123
instance.finish(123, 1)
self.assertEqual(instance.killing, 0)
self.assertEqual(instance.pid, 0)
self.assertEqual(options.parent_pipes_closed, pipes)
self.assertEqual(instance.pipes, {})
self.assertEqual(instance.dispatchers, {})
self.assertEqual(options.logger.data[0],
'exited: notthere (terminated by SIGHUP; not expected)')
self.assertEqual(instance.exitstatus, None)
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateBackoffEvent)
self.assertEqual(event.from_state, ProcessStates.STARTING)
def test_finish_with_current_event_sends_rejected(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
events.subscribe(events.EventRejectedEvent, lambda x: L.append(x))
options = DummyOptions()
config = DummyPConfig(options, 'notthere', '/notthere',
stdout_logfile='/tmp/foo', startsecs=10)
instance = self._makeOne(config)
from supervisor.states import ProcessStates
instance.state = ProcessStates.RUNNING
event = DummyEvent()
instance.event = event
instance.finish(123, 1)
self.assertEqual(len(L), 2)
event1, event2 = L
self.assertEqual(event1.__class__,
events.ProcessStateExitedEvent)
self.assertEqual(event2.__class__, events.EventRejectedEvent)
self.assertEqual(event2.process, instance)
self.assertEqual(event2.event, event)
self.assertEqual(instance.event, None)
def test_set_uid_no_uid(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.set_uid()
self.assertEqual(options.privsdropped, None)
def test_set_uid(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test', uid=1)
instance = self._makeOne(config)
msg = instance.set_uid()
self.assertEqual(options.privsdropped, 1)
self.assertEqual(msg, None)
def test_cmp_bypriority(self):
options = DummyOptions()
config = DummyPConfig(options, 'notthere', '/notthere',
stdout_logfile='/tmp/foo',
priority=1)
instance = self._makeOne(config)
config = DummyPConfig(options, 'notthere1', '/notthere',
stdout_logfile='/tmp/foo',
priority=2)
instance1 = self._makeOne(config)
config = DummyPConfig(options, 'notthere2', '/notthere',
stdout_logfile='/tmp/foo',
priority=3)
instance2 = self._makeOne(config)
L = [instance2, instance, instance1]
L.sort()
self.assertEqual(L, [instance, instance1, instance2])
def test_transition_stopped_to_starting_supervisor_stopping(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates, SupervisorStates
options = DummyOptions()
options.mood = SupervisorStates.SHUTDOWN
# this should not be spawned, as supervisor is shutting down
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
process = self._makeOne(pconfig)
process.laststart = 0
process.state = ProcessStates.STOPPED
process.transition()
self.assertEqual(process.state, ProcessStates.STOPPED)
self.assertEqual(L, [])
def test_transition_stopped_to_starting_supervisor_running(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates, SupervisorStates
options = DummyOptions()
options.mood = SupervisorStates.RUNNING
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
process = self._makeOne(pconfig)
process.laststart = 0
process.state = ProcessStates.STOPPED
process.transition()
self.assertEqual(process.state, ProcessStates.STARTING)
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateStartingEvent)
def test_transition_exited_to_starting_supervisor_stopping(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates, SupervisorStates
options = DummyOptions()
options.mood = SupervisorStates.SHUTDOWN
# this should not be spawned, as supervisor is shutting down
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
from supervisor.datatypes import RestartUnconditionally
pconfig.autorestart = RestartUnconditionally
process = self._makeOne(pconfig)
process.laststart = 1
process.system_stop = 1
process.state = ProcessStates.EXITED
process.transition()
self.assertEqual(process.state, ProcessStates.EXITED)
self.assertEqual(process.system_stop, 1)
self.assertEqual(L, [])
def test_transition_exited_to_starting_uncond_supervisor_running(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates
options = DummyOptions()
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
from supervisor.datatypes import RestartUnconditionally
pconfig.autorestart = RestartUnconditionally
process = self._makeOne(pconfig)
process.laststart = 1
process.state = ProcessStates.EXITED
process.transition()
self.assertEqual(process.state, ProcessStates.STARTING)
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateStartingEvent)
def test_transition_exited_to_starting_condit_supervisor_running(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates
options = DummyOptions()
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
from supervisor.datatypes import RestartWhenExitUnexpected
pconfig.autorestart = RestartWhenExitUnexpected
process = self._makeOne(pconfig)
process.laststart = 1
process.state = ProcessStates.EXITED
process.exitstatus = 'bogus'
process.transition()
self.assertEqual(process.state, ProcessStates.STARTING)
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateStartingEvent)
def test_transition_exited_to_starting_condit_fls_supervisor_running(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates
options = DummyOptions()
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
from supervisor.datatypes import RestartWhenExitUnexpected
pconfig.autorestart = RestartWhenExitUnexpected
process = self._makeOne(pconfig)
process.laststart = 1
process.state = ProcessStates.EXITED
process.exitstatus = 0
process.transition()
self.assertEqual(process.state, ProcessStates.EXITED)
self.assertEqual(L, [])
def test_transition_backoff_to_starting_supervisor_stopping(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates, SupervisorStates
options = DummyOptions()
options.mood = SupervisorStates.SHUTDOWN
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
process = self._makeOne(pconfig)
process.laststart = 1
process.delay = 0
process.backoff = 0
process.state = ProcessStates.BACKOFF
process.transition()
self.assertEqual(process.state, ProcessStates.BACKOFF)
self.assertEqual(L, [])
def test_transition_backoff_to_starting_supervisor_running(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates, SupervisorStates
options = DummyOptions()
options.mood = SupervisorStates.RUNNING
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
process = self._makeOne(pconfig)
process.laststart = 1
process.delay = 0
process.backoff = 0
process.state = ProcessStates.BACKOFF
process.transition()
self.assertEqual(process.state, ProcessStates.STARTING)
self.assertEqual(len(L), 1)
self.assertEqual(L[0].__class__, events.ProcessStateStartingEvent)
def test_transition_backoff_to_starting_supervisor_running_notyet(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates, SupervisorStates
options = DummyOptions()
options.mood = SupervisorStates.RUNNING
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
process = self._makeOne(pconfig)
process.laststart = 1
process.delay = sys.maxint
process.backoff = 0
process.state = ProcessStates.BACKOFF
process.transition()
self.assertEqual(process.state, ProcessStates.BACKOFF)
self.assertEqual(L, [])
def test_transition_starting_to_running(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates
options = DummyOptions()
# this should go from STARTING to RUNNING via transition()
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
process = self._makeOne(pconfig)
process.backoff = 1
process.delay = 1
process.system_stop = 0
process.laststart = 1
process.pid = 1
process.stdout_buffer = 'abc'
process.stderr_buffer = 'def'
process.state = ProcessStates.STARTING
process.transition()
# this implies RUNNING
self.assertEqual(process.backoff, 0)
self.assertEqual(process.delay, 0)
self.assertEqual(process.system_stop, 0)
self.assertEqual(options.logger.data[0],
'success: process entered RUNNING state, process has '
'stayed up for > than 10 seconds (startsecs)')
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateRunningEvent)
def test_transition_backoff_to_fatal(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates
options = DummyOptions()
# this should go from BACKOFF to FATAL via transition()
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
process = self._makeOne(pconfig)
process.laststart = 1
process.backoff = 10000
process.delay = 1
process.system_stop = 0
process.stdout_buffer = 'abc'
process.stderr_buffer = 'def'
process.state = ProcessStates.BACKOFF
process.transition()
# this implies FATAL
self.assertEqual(process.backoff, 0)
self.assertEqual(process.delay, 0)
self.assertEqual(process.system_stop, 1)
self.assertEqual(options.logger.data[0],
'gave up: process entered FATAL state, too many start'
' retries too quickly')
self.assertEqual(len(L), 1)
event = L[0]
self.assertEqual(event.__class__, events.ProcessStateFatalEvent)
def test_transition_stops_unkillable_notyet(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates
options = DummyOptions()
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
process = self._makeOne(pconfig)
process.delay = sys.maxint
process.state = ProcessStates.STOPPING
process.transition()
self.assertEqual(process.state, ProcessStates.STOPPING)
self.assertEqual(L, [])
def test_transition_stops_unkillable(self):
from supervisor import events
L = []
events.subscribe(events.ProcessStateEvent, lambda x: L.append(x))
from supervisor.states import ProcessStates
options = DummyOptions()
pconfig = DummyPConfig(options, 'process', 'process','/bin/process')
process = self._makeOne(pconfig)
process.delay = 0
process.pid = 1
process.killing = 0
process.state = ProcessStates.STOPPING
process.transition()
self.assertEqual(process.killing, 1)
self.assertNotEqual(process.delay, 0)
self.assertEqual(process.state, ProcessStates.STOPPING)
self.assertEqual(options.logger.data[0],
"killing 'process' (1) with SIGKILL")
import signal
self.assertEqual(options.kills[1], signal.SIGKILL)
self.assertEqual(L, [])
def test_change_state_doesnt_notify_if_no_state_change(self):
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.state = 10
self.assertEqual(instance.change_state(10), False)
def test_change_state_sets_backoff_and_delay(self):
from supervisor.states import ProcessStates
options = DummyOptions()
config = DummyPConfig(options, 'test', '/test')
instance = self._makeOne(config)
instance.state = 10
instance.change_state(ProcessStates.BACKOFF)
self.assertEqual(instance.backoff, 1)
self.failUnless(instance.delay > 0)
class FastCGISubprocessTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.process import FastCGISubprocess
return FastCGISubprocess
def _makeOne(self, *arg, **kw):
return self._getTargetClass()(*arg, **kw)
def tearDown(self):
from supervisor.events import clear
clear()
def test_no_group(self):
options = DummyOptions()
options.forkpid = 0
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
instance = self._makeOne(config)
self.assertRaises(NotImplementedError, instance.spawn)
def test_no_socket_manager(self):
options = DummyOptions()
options.forkpid = 0
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
instance = self._makeOne(config)
instance.group = DummyProcessGroup(DummyPGroupConfig(options))
self.assertRaises(NotImplementedError, instance.spawn)
def test_prepare_child_fds(self):
options = DummyOptions()
options.forkpid = 0
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
instance = self._makeOne(config)
sock_config = DummySocketConfig(7)
gconfig = DummyFCGIGroupConfig(options, 'whatever', 999, None,
sock_config)
instance.group = DummyFCGIProcessGroup(gconfig)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(len(options.duped), 3)
self.assertEqual(options.duped[7], 0)
self.assertEqual(options.duped[instance.pipes['child_stdout']], 1)
self.assertEqual(options.duped[instance.pipes['child_stderr']], 2)
self.assertEqual(len(options.fds_closed), options.minfds - 3)
def test_prepare_child_fds_stderr_redirected(self):
options = DummyOptions()
options.forkpid = 0
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
config.redirect_stderr = True
instance = self._makeOne(config)
sock_config = DummySocketConfig(13)
gconfig = DummyFCGIGroupConfig(options, 'whatever', 999, None,
sock_config)
instance.group = DummyFCGIProcessGroup(gconfig)
result = instance.spawn()
self.assertEqual(result, None)
self.assertEqual(len(options.duped), 2)
self.assertEqual(options.duped[13], 0)
self.assertEqual(len(options.fds_closed), options.minfds - 3)
def test_before_spawn_gets_socket_ref(self):
options = DummyOptions()
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
instance = self._makeOne(config)
sock_config = DummySocketConfig(7)
gconfig = DummyFCGIGroupConfig(options, 'whatever', 999, None,
sock_config)
instance.group = DummyFCGIProcessGroup(gconfig)
self.assertTrue(instance.fcgi_sock is None)
instance.before_spawn()
self.assertFalse(instance.fcgi_sock is None)
def test_after_finish_removes_socket_ref(self):
options = DummyOptions()
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
instance = self._makeOne(config)
instance.fcgi_sock = 'hello'
instance.after_finish()
self.assertTrue(instance.fcgi_sock is None)
#Patch Subprocess.finish() method for this test to verify override
@patch.object(Subprocess, 'finish', Mock(return_value=sentinel.finish_result))
def test_finish_override(self):
options = DummyOptions()
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
instance = self._makeOne(config)
instance.after_finish = Mock()
result = instance.finish(sentinel.pid, sentinel.sts)
self.assertEqual(sentinel.finish_result, result,
'FastCGISubprocess.finish() did not pass thru result')
self.assertEqual(1, instance.after_finish.call_count,
'FastCGISubprocess.after_finish() not called once')
finish_mock = Subprocess.finish
self.assertEqual(1, finish_mock.call_count,
'Subprocess.finish() not called once')
pid_arg = finish_mock.call_args[0][1]
sts_arg = finish_mock.call_args[0][2]
self.assertEqual(sentinel.pid, pid_arg,
'Subprocess.finish() pid arg was not passed')
self.assertEqual(sentinel.sts, sts_arg,
'Subprocess.finish() sts arg was not passed')
#Patch Subprocess.spawn() method for this test to verify override
@patch.object(Subprocess, 'spawn', Mock(return_value=sentinel.ppid))
def test_spawn_override_success(self):
options = DummyOptions()
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
instance = self._makeOne(config)
instance.before_spawn = Mock()
result = instance.spawn()
self.assertEqual(sentinel.ppid, result,
'FastCGISubprocess.spawn() did not pass thru result')
self.assertEqual(1, instance.before_spawn.call_count,
'FastCGISubprocess.before_spawn() not called once')
spawn_mock = Subprocess.spawn
self.assertEqual(1, spawn_mock.call_count,
'Subprocess.spawn() not called once')
#Patch Subprocess.spawn() method for this test to verify error handling
@patch.object(Subprocess, 'spawn', Mock(return_value=None))
def test_spawn_error(self):
options = DummyOptions()
config = DummyPConfig(options, 'good', '/good/filename', uid=1)
instance = self._makeOne(config)
instance.before_spawn = Mock()
instance.fcgi_sock = 'nuke me on error'
result = instance.spawn()
self.assertEqual(None, result,
'FastCGISubprocess.spawn() did return None on error')
self.assertEqual(1, instance.before_spawn.call_count,
'FastCGISubprocess.before_spawn() not called once')
self.assertEqual(None, instance.fcgi_sock,
'FastCGISubprocess.spawn() did not remove sock ref on error')
class ProcessGroupBaseTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.process import ProcessGroupBase
return ProcessGroupBase
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_get_unstopped_processes(self):
options = DummyOptions()
from supervisor.states import ProcessStates
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1, state=ProcessStates.STOPPING)
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
group = self._makeOne(gconfig)
group.processes = { 'process1': process1 }
unstopped = group.get_unstopped_processes()
self.assertEqual(unstopped, [process1])
def test_stop_all(self):
from supervisor.states import ProcessStates
options = DummyOptions()
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1, state=ProcessStates.STOPPED)
pconfig2 = DummyPConfig(options, 'process2', 'process2','/bin/process2')
process2 = DummyProcess(pconfig2, state=ProcessStates.RUNNING)
pconfig3 = DummyPConfig(options, 'process3', 'process3','/bin/process3')
process3 = DummyProcess(pconfig3, state=ProcessStates.STARTING)
pconfig4 = DummyPConfig(options, 'process4', 'process4','/bin/process4')
process4 = DummyProcess(pconfig4, state=ProcessStates.BACKOFF)
process4.delay = 1000
process4.backoff = 10
gconfig = DummyPGroupConfig(
options,
pconfigs=[pconfig1, pconfig2, pconfig3, pconfig4])
group = self._makeOne(gconfig)
group.processes = {'process1': process1, 'process2': process2,
'process3':process3, 'process4':process4}
group.stop_all()
self.assertEqual(process1.stop_called, False)
self.assertEqual(process2.stop_called, True)
self.assertEqual(process3.stop_called, True)
self.assertEqual(process4.stop_called, False)
self.assertEqual(process4.state, ProcessStates.FATAL)
def test_get_dispatchers(self):
options = DummyOptions()
from supervisor.states import ProcessStates
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1, state=ProcessStates.STOPPING)
process1.dispatchers = {4:None}
pconfig2 = DummyPConfig(options, 'process2', 'process2','/bin/process2')
process2 = DummyProcess(pconfig2, state=ProcessStates.STOPPING)
process2.dispatchers = {5:None}
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1, pconfig2])
group = self._makeOne(gconfig)
group.processes = { 'process1': process1, 'process2': process2 }
result= group.get_dispatchers()
self.assertEqual(result, {4:None, 5:None})
def test_reopenlogs(self):
options = DummyOptions()
from supervisor.states import ProcessStates
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1, state=ProcessStates.STOPPING)
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
group = self._makeOne(gconfig)
group.processes = {'process1': process1}
group.reopenlogs()
self.assertEqual(process1.logs_reopened, True)
def test_removelogs(self):
options = DummyOptions()
from supervisor.states import ProcessStates
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1, state=ProcessStates.STOPPING)
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
group = self._makeOne(gconfig)
group.processes = {'process1': process1}
group.removelogs()
self.assertEqual(process1.logsremoved, True)
def test_cmp(self):
options = DummyOptions()
gconfig1 = DummyPGroupConfig(options)
group1 = self._makeOne(gconfig1)
gconfig2 = DummyPGroupConfig(options)
group2 = self._makeOne(gconfig2)
group1.priority = 5
group2.priority = 1
L = [group1, group2]
L.sort()
self.assertEqual(L, [group2, group1])
class ProcessGroupTests(ProcessGroupBaseTests):
def _getTargetClass(self):
from supervisor.process import ProcessGroup
return ProcessGroup
def test_repr(self):
options = DummyOptions()
gconfig = DummyPGroupConfig(options)
group = self._makeOne(gconfig)
s = repr(group)
self.assertTrue(s.startswith(
'<supervisor.process.ProcessGroup instance at'), s)
self.assertTrue(s.endswith('named whatever>'), s)
def test_transition(self):
options = DummyOptions()
from supervisor.states import ProcessStates
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1, state=ProcessStates.STOPPING)
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
group = self._makeOne(gconfig)
group.processes = {'process1': process1}
group.transition()
self.assertEqual(process1.transitioned, True)
class EventListenerPoolTests(ProcessGroupBaseTests):
def setUp(self):
from supervisor.events import clear
clear()
def tearDown(self):
from supervisor.events import clear
clear()
def _getTargetClass(self):
from supervisor.process import EventListenerPool
return EventListenerPool
def test_ctor(self):
options = DummyOptions()
gconfig = DummyPGroupConfig(options)
class EventType:
pass
gconfig.pool_events = (EventType,)
pool = self._makeOne(gconfig)
from supervisor import events
self.assertEqual(len(events.callbacks), 2)
self.assertEqual(events.callbacks[0],
(EventType, pool._acceptEvent))
self.assertEqual(events.callbacks[1],
(events.EventRejectedEvent, pool.handle_rejected))
self.assertEqual(pool.serial, -1)
def test__eventEnvelope(self):
options = DummyOptions()
options.identifier = 'thesupervisorname'
gconfig = DummyPGroupConfig(options)
gconfig.name = 'thepoolname'
pool = self._makeOne(gconfig)
from supervisor import events
result = pool._eventEnvelope(
events.EventTypes.PROCESS_COMMUNICATION_STDOUT, 80, 20, 'payload\n')
header, payload = result.split('\n', 1)
headers = header.split()
self.assertEqual(headers[0], 'ver:3.0')
self.assertEqual(headers[1], 'server:thesupervisorname')
self.assertEqual(headers[2], 'serial:80')
self.assertEqual(headers[3], 'pool:thepoolname')
self.assertEqual(headers[4], 'poolserial:20')
self.assertEqual(headers[5], 'eventname:PROCESS_COMMUNICATION_STDOUT')
self.assertEqual(headers[6], 'len:8')
self.assertEqual(payload, 'payload\n')
def test_handle_rejected_no_overflow(self):
options = DummyOptions()
gconfig = DummyPGroupConfig(options)
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1)
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
pool = self._makeOne(gconfig)
pool.processes = {'process1': process1}
pool.event_buffer = [None, None]
class DummyEvent1:
serial = 'abc'
class DummyEvent2:
process = process1
event = DummyEvent1()
dummyevent = DummyEvent2()
dummyevent.serial = 1
pool.handle_rejected(dummyevent)
self.assertEqual(pool.event_buffer, [dummyevent.event, None, None])
def test_handle_rejected_event_buffer_overflowed(self):
options = DummyOptions()
gconfig = DummyPGroupConfig(options)
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1)
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
gconfig.buffer_size = 3
pool = self._makeOne(gconfig)
pool.processes = {'process1': process1}
class DummyEvent:
def __init__(self, serial):
self.serial = serial
class DummyRejectedEvent:
def __init__(self, serial):
self.process = process1
self.event = DummyEvent(serial)
event_a = DummyEvent('a')
event_b = DummyEvent('b')
event_c = DummyEvent('c')
rej_event = DummyRejectedEvent('rejected')
pool.event_buffer = [event_a, event_b, event_c]
pool.handle_rejected(rej_event)
serials = [ x.serial for x in pool.event_buffer ]
# we popped a, and we inserted the rejected event into the 1st pos
self.assertEqual(serials, ['rejected', 'b', 'c'])
self.assertEqual(pool.config.options.logger.data[0],
'pool whatever event buffer overflowed, discarding event a')
def test_dispatch_pipe_error(self):
options = DummyOptions()
gconfig = DummyPGroupConfig(options)
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
from supervisor.states import EventListenerStates
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
pool = self._makeOne(gconfig)
process1 = pool.processes['process1']
import errno
process1.write_error = errno.EPIPE
process1.listener_state = EventListenerStates.READY
event = DummyEvent()
pool._acceptEvent(event)
pool.dispatch()
self.assertEqual(process1.listener_state, EventListenerStates.READY)
self.assertEqual(pool.event_buffer, [event])
self.assertEqual(options.logger.data[0],
'rebuffering event abc for pool whatever (bufsize 0)')
def test__acceptEvent_attaches_pool_serial_and_serial(self):
from supervisor.process import GlobalSerial
options = DummyOptions()
gconfig = DummyPGroupConfig(options)
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
pool = self._makeOne(gconfig)
process1 = pool.processes['process1']
from supervisor.states import EventListenerStates
process1.listener_state = EventListenerStates.READY
event = DummyEvent(None)
pool._acceptEvent(event)
self.assertEqual(event.serial, GlobalSerial.serial)
self.assertEqual(event.pool_serials['whatever'], pool.serial)
def test_repr(self):
options = DummyOptions()
gconfig = DummyPGroupConfig(options)
pool = self._makeOne(gconfig)
s = repr(pool)
self.assertTrue(s.startswith(
'<supervisor.process.EventListenerPool instance at'))
self.assertTrue(s.endswith('named whatever>'))
def test_transition_nobody_ready(self):
options = DummyOptions()
from supervisor.states import ProcessStates
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1, state=ProcessStates.STARTING)
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
pool = self._makeOne(gconfig)
pool.processes = {'process1': process1}
event = DummyEvent()
event.serial = 'a'
from supervisor.states import EventListenerStates
process1.listener_state = EventListenerStates.BUSY
pool._acceptEvent(event)
pool.transition()
self.assertEqual(process1.transitioned, True)
self.assertEqual(pool.event_buffer, [event])
data = pool.config.options.logger.data
def test_transition_event_proc_not_running(self):
options = DummyOptions()
from supervisor.states import ProcessStates
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1, state=ProcessStates.STARTING)
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
pool = self._makeOne(gconfig)
pool.processes = {'process1': process1}
event = DummyEvent()
from supervisor.states import EventListenerStates
event.serial = 1
process1.listener_state = EventListenerStates.READY
pool._acceptEvent(event)
pool.transition()
self.assertEqual(process1.transitioned, True)
self.assertEqual(pool.event_buffer, [event])
self.assertEqual(process1.stdin_buffer, '')
self.assertEqual(process1.listener_state, EventListenerStates.READY)
def test_transition_event_proc_running(self):
options = DummyOptions()
from supervisor.states import ProcessStates
pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
process1 = DummyProcess(pconfig1, state=ProcessStates.RUNNING)
gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
pool = self._makeOne(gconfig)
pool.processes = {'process1': process1}
event = DummyEvent()
from supervisor.states import EventListenerStates
process1.listener_state = EventListenerStates.READY
class DummyGroup:
config = gconfig
process1.group = DummyGroup
pool._acceptEvent(event)
pool.transition()
self.assertEqual(process1.transitioned, True)
self.assertEqual(pool.event_buffer, [])
header, payload = process1.stdin_buffer.split('\n', 1)
self.assertEquals(payload, 'dummy event', payload)
self.assertEqual(process1.listener_state, EventListenerStates.BUSY)
self.assertEqual(process1.event, event)
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
pmoravec/sos
|
refs/heads/master
|
tests/report_tests/plugin_tests/networking.py
|
2
|
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos_tests import StageOneReportTest
class NetworkingPluginTest(StageOneReportTest):
"""
Basic tests to ensure proper collection from the networking plugins
:avocado: tags=stageone
"""
sos_cmd = '-o networking'
def test_common_files_collected(self):
self.assertFileCollected('/etc/resolv.conf')
self.assertFileCollected('/etc/hosts')
def test_ip_addr_symlink_created(self):
self.assertFileCollected('ip_addr')
def test_forbidden_globs_skipped(self):
self.assertFileGlobNotInArchive('/proc/net/rpc/*/channel')
self.assertFileGlobNotInArchive('/proc/net/rpc/*/flush')
|
cjcjameson/gpdb
|
refs/heads/master
|
src/test/tinc/tincrepo/mpp/gpdb/tests/package/oid_inconsistency/test_oid_inconsistency.py
|
9
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os, re
import tinctest
import unittest2 as unittest
from tinctest.lib import Gpdiff
from tinctest.lib import local_path, run_shell_command
from mpp.models import MPPTestCase
from mpp.lib.gppkg.gppkg import Gppkg
from mpp.lib.PSQL import PSQL
GPHOME = os.environ.get("GPHOME")
LIBDIR = '%s/lib/postgresql' % (GPHOME)
class OidInconsistencyMPPTestCase(MPPTestCase):
def __init__(self, methodName):
self.dbname = os.environ.get('PGDATABASE')
super(OidInconsistencyMPPTestCase, self).__init__(methodName)
@classmethod
def setUpClass(cls):
"""
Checking if plperl package installed, otherwise install the package
"""
super(OidInconsistencyMPPTestCase, cls).setUpClass()
cmd = 'gpssh --version'
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command (cmd, 'check product version', res)
gppkg = Gppkg()
product_version = res['stdout']
gppkg.gppkg_install(product_version, 'plperl')
def compare_oids(self, out_file):
'''Get the oids from out file and check if all are same '''
myfile = open(out_file, 'r')
oid_ls = []
for line in myfile:
oid = re.match('\s*\d+', line)
if oid :
oid_ls.append (oid.group(0))
if len(set(oid_ls)) == 1:
tinctest.logger.info("Success: Oids are consistent")
return True
else:
tinctest.logger.info("Failure: Oids are not consistent, please refer %s " % out_file)
return False
def doTest(self, sql_filename):
'''Run the file, compare oids in out file '''
sql_file = local_path(sql_filename)
out_file = local_path(sql_filename.split('.sql')[0] + '.out')
PSQL.run_sql_file(sql_file = sql_file, out_file = out_file)
isOk = self.compare_oids(out_file)
self.assertTrue(isOk)
def checkAPPHOMEandLIB(self, libso, apphome=''):
if apphome == '':
""" check application library """
#print "%s/%s.so" % (LIBDIR, libso)
return os.path.isfile("%s/%s.so" % (LIBDIR, libso))
else:
""" check application home and library """
return os.environ.get(apphome) and os.path.isfile("%s/%s.so" % (LIBDIR, libso))
def test_01_pg_language_oid(self):
'''Oid_inconsistency : pg_langauge -oid '''
if self.checkAPPHOMEandLIB("plperl"):
self.doTest('pg_language_oid.sql')
else:
self.skipTest('skipping test: no plperl.so found in $GPHOME/lib/postgresql')
def test_02_pg_language_lanvalidator(self):
'''Oid_inconsistency : pg_langauge -lanvalidator '''
if self.checkAPPHOMEandLIB("plperl"):
self.doTest('pg_language_lanvalidator.sql')
else:
self.skipTest('skipping test: no plperl.so found in $GPHOME/lib/postgresql')
def test_03_pg_language_lanplcallfoid(self):
'''Oid_inconsistency : pg_langauge -lanplcallfoid '''
if self.checkAPPHOMEandLIB("plperl"):
self.doTest('pg_language_lanplcallfoid.sql')
else:
self.skipTest('skipping test: no plperl.so found in $GPHOME/lib/postgresql')
def test_04_pg_proc_oid(self):
'''Oid_inconsistency : pg_proc -oid '''
if self.checkAPPHOMEandLIB("plperl"):
self.doTest('pg_proc_oid_1.sql')
self.doTest('pg_proc_oid_2.sql')
else:
self.skipTest('skipping test: no plperl.so found in $GPHOME/lib/postgresql')
def test_05_pg_rewrite_view(self):
'''Oid_inconsistency : pg_rewrite create view -oid '''
self.doTest('pg_rewrite_view.sql')
def test_06_pg_rewrite_rule_select(self):
'''Oid_inconsistency : pg_rewrite_rule select event -oid '''
self.doTest('pg_rewrite_rule_select.sql')
def test_07_pg_rewrite_rule_update(self):
'''Oid_inconsistency : pg_rewrite_rule update event -oid '''
self.doTest('pg_rewrite_rule_update.sql')
def test_08_pg_rewrite_rule_delete(self):
'''Oid_inconsistency : pg_rewrite_rule delete event -oid '''
self.doTest('pg_rewrite_rule_delete.sql')
def test_09_pg_rewrite_rule_insert(self):
'''Oid_inconsistency : pg_rewrite_rule insert event -oid '''
self.doTest('pg_rewrite_rule_insert.sql')
def test_10_pg_trigger_oid(self):
'''Oid_inconsistency : pg_trigger -oid '''
self.doTest('pg_trigger_oid.sql')
def test_11_pg_constraint_create_table_column_unique(self):
'''Oid_inconsistency : pg_constaint_create table column constraint_unique -oid,conrelid '''
self.doTest('pg_constraint_create_table_column_unique.sql')
self.doTest('pg_constraint_create_table_column_unique_2.sql')
def test_12_pg_constraint_create_table_column_primary_key(self):
'''Oid_inconsistency : pg_constaint_create table column constraint primary_key -oid,conrelid '''
self.doTest('pg_constraint_create_table_column_primary_key.sql')
self.doTest('pg_constraint_create_table_column_primary_key_2.sql')
def test_13_pg_constraint_create_table_column_check(self):
'''Oid_inconsistency : pg_constaint_create table column constraint check -oid,conrelid '''
self.doTest('pg_constraint_create_table_column_check.sql')
self.doTest('pg_constraint_create_table_column_check_2.sql')
def test_14_pg_constraint_create_table_unique(self):
'''Oid_inconsistency : pg_constaint_create table_unique -oid,conrelid '''
self.doTest('pg_constraint_create_table_unique.sql')
self.doTest('pg_constraint_create_table_unique_2.sql')
def test_15_pg_constraint_create_table_primary_key(self):
'''Oid_inconsistency : pg_constaint_create table_primary_key -oid,conrelid '''
self.doTest('pg_constraint_create_table_primary_key.sql')
self.doTest('pg_constraint_create_table_primary_key_2.sql')
def test_16_pg_constraint_create_table_check(self):
'''Oid_inconsistency : pg_constaint_create table_check -oid,conrelid '''
self.doTest('pg_constraint_create_table_check.sql')
self.doTest('pg_constraint_create_table_check_2.sql')
def test_17_pg_constraint_create_table_like(self):
'''Oid_inconsistency : pg_constaint_create table like -oid,conrelid '''
self.doTest('pg_constraint_create_table_like.sql')
self.doTest('pg_constraint_create_table_like_2.sql')
def test_18_pg_constraint_create_table_inherit(self):
'''Oid_inconsistency : pg_constaint_create table inherit -oid,conrelid '''
self.doTest('pg_constraint_create_table_inherit.sql')
self.doTest('pg_constraint_create_table_inherit_2.sql')
def test_19_pg_constraint_create_table_partition(self):
'''Oid_inconsistency : pg_constaint create partition table -oid'''
self.doTest('pg_constraint_create_table_partition.sql')
self.doTest('pg_constraint_create_table_partition_2.sql')
self.doTest('pg_constraint_create_table_partition_3.sql')
self.doTest('pg_constraint_create_table_partition_4.sql')
def test_20_pg_constraint_create_table_partition_unique(self):
'''Oid_inconsistency : pg_constaint create partition table with unique -oid,conrelid'''
self.doTest('pg_constraint_create_table_partition_unique.sql')
self.doTest('pg_constraint_create_table_partition_unique_2.sql')
def test_21_pg_constraint_alter_table_add_primary_key(self):
'''Oid_inconsistency : pg_constaint_alter_table_add_primary_key -oid,conrelid '''
self.doTest('pg_constraint_alter_table_add_primary_key.sql')
self.doTest('pg_constraint_alter_table_add_primary_key_2.sql')
def test_22_pg_constraint_alter_table_add_unique(self):
'''Oid_inconsistency : pg_constaint_alter_table_add_unique -oid,conrelid '''
self.doTest('pg_constraint_alter_table_add_unique.sql')
self.doTest('pg_constraint_alter_table_add_unique_2.sql')
def test_23_pg_constraint_alter_table_add_check(self):
'''Oid_inconsistency : pg_constaint_alter_table_add_check -oid,conrelid '''
self.doTest('pg_constraint_alter_table_add_check.sql')
self.doTest('pg_constraint_alter_table_add_check_2.sql')
def test_24_pg_constraint_alter_table_add_column_constraint(self):
'''Oid_inconsistency : pg_constaint_alter_table_add_column_with_constraint -oid,conrelid '''
self.doTest('pg_constraint_alter_table_add_column_constraint.sql')
self.doTest('pg_constraint_alter_table_add_column_constraint_2.sql')
def test_25_pg_constraint_alter_part_table_add_column_constraint(self):
'''Oid_inconsistency : pg_constaint_alter_part_table_add_column_with_constraint -oid,conrelid '''
self.doTest('pg_constraint_alter_part_table_add_column_constraint.sql')
self.doTest('pg_constraint_alter_part_table_add_column_constraint_2.sql')
self.doTest('pg_constraint_alter_part_table_add_column_constraint_3.sql')
def test_26_pg_constraint_alter_table_add_partition(self):
'''Oid_inconsistency : pg_constaint_alter_table_add_partition -oid'''
self.doTest('pg_constraint_alter_table_add_partition.sql')
# Commenting due to MPP-13685
#def test_27_pg_constraint_alter_table_exchange_partition(self):
# '''Oid_inconsistency : pg_constaint_alter_table_exchange_partition -oid'''
# self.doTest('pg_constraint_alter_table_exchange_partition.sql')
def test_28_pg_constraint_alter_table_split_partition(self):
'''Oid_inconsistency : pg_constaint_alter_table_split_partition -oid'''
self.doTest('pg_constraint_alter_table_split_partition.sql')
self.doTest('pg_constraint_alter_table_split_partition_2.sql')
def test_29_pg_constraint_create_domain(self):
'''Oid_inconsistency : pg_constaint_create_domain -oid'''
self.doTest('pg_constraint_create_domain.sql')
def test_30_pg_constraint_alter_domain(self):
'''Oid_inconsistency : pg_constaint_alter_domain -oid'''
self.doTest('pg_constraint_alter_domain.sql')
def test_31_pg_constraint_create_table_foreign_key(self):
'''Oid_inconsistency : pg_constaint create_table_foreign_key -oid'''
self.doTest('pg_constraint_create_table_foreign_key.sql')
self.doTest('pg_constraint_create_table_foreign_key_2.sql')
def test_32_pg_constraint_alter_table_foreign_key(self):
'''Oid_inconsistency : pg_constaint alter_table_foreign_key -oid'''
self.doTest('pg_constraint_alter_table_foreign_key.sql')
self.doTest('pg_constraint_alter_table_foreign_key_2.sql')
def test_33_pg_constraint_alter_table_inherits_add_constraint(self):
'''Oid_inconsistency : pg_constaint_alter_table_inherits_add_constraint -oid'''
self.doTest('pg_constraint_alter_table_inherits_add_constraint.sql')
self.doTest('pg_constraint_alter_table_inherits_add_constraint_2.sql')
def test_34_pg_constraint_alter_table_alter_type(self):
'''Oid_inconsistency : pg_constaint_alter_table_alter data type -oid,conrelid '''
self.doTest('pg_constraint_alter_table_alter_type.sql')
self.doTest('pg_constraint_alter_table_alter_type_2.sql')
def test_35_pg_constraint_create_table_partition_primary(self):
'''Oid_inconsistency : pg_constaint create partition table with primary key -oid,conrelid'''
self.doTest('pg_constraint_create_table_partition_primary.sql')
self.doTest('pg_constraint_create_table_partition_primary_2.sql')
# Commenting due to MPP-14089
#def test_36_pg_constraint_create_part_table_foreign_key(self):
# '''Oid_inconsistency : pg_constaint create_part_table_foreign_key -oid'''
# self.doTest('pg_constraint_create_part_table_foreign_key.sql')
# self.doTest('pg_constraint_create_part_table_foreign_key_2.sql')
# self.doTest('pg_constraint_create_part_table_foreign_key_3.sql')
# self.doTest('pg_constraint_create_part_table_foreign_key_4.sql')
#def test_37_pg_constraint_alter_part_table_add_foreign_key(self):
# '''Oid_inconsistency : pg_constaint alter_part_table_add_foreign_key -oid'''
# self.doTest('pg_constraint_alter_part_table_add_foreign_key.sql')
# self.doTest('pg_constraint_alter_part_table_add_foreign_key_2.sql')
# self.doTest('pg_constraint_alter_part_table_add_foreign_key_3.sql')
# self.doTest('pg_constraint_alter_part_table_add_foreign_key_4.sql')
def test_38_pg_constraint_alter_part_table_alter_type(self):
'''Oid_inconsistency : pg_constaint_alter part_table alter data type -oid'''
self.doTest('pg_constraint_alter_part_table_alter_type.sql')
self.doTest('pg_constraint_alter_part_table_alter_type_2.sql')
def test_39_pg_constraint_alter_table_add_default_part(self):
'''Oid_inconsistency : pg_constaint_alter_table_add_default_part -oid'''
self.doTest('pg_constraint_alter_table_add_default_part.sql')
def test_40_pg_constraint_create_part_table_check(self):
'''Oid_inconsistency : pg_constaint_create_part_table_check -oid'''
self.doTest('pg_constraint_create_part_table_check.sql')
self.doTest('pg_constraint_create_part_table_check_2.sql')
def test_41_pg_constraint_alter_part_table_add_unique(self):
'''Oid_inconsistency : pg_constaint alter_part_table_add_unique -oid'''
self.doTest('pg_constraint_alter_part_table_add_unique.sql')
self.doTest('pg_constraint_alter_part_table_add_unique_2.sql')
def test_42_pg_constraint_alter_part_table_add_primary(self):
'''Oid_inconsistency : pg_constaint alter_part_table_add_primary -oid'''
self.doTest('pg_constraint_alter_part_table_add_primary.sql')
self.doTest('pg_constraint_alter_part_table_add_primary_2.sql')
def test_43_alter_table_with_oid(self):
'''MPP-13870: Alter table Set Without Oids fails in case of inheritance'''
sql_file = local_path('alter_table_with_oid.sql')
out_file = local_path('alter_table_with_oid.out')
ans_file = local_path('alter_table_with_oid.ans')
PSQL.run_sql_file(sql_file = sql_file, out_file = out_file)
self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
|
Kryz/sentry
|
refs/heads/master
|
src/sentry/models/groupassignee.py
|
23
|
"""
sentry.models.groupassignee
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from django.db import models
from django.utils import timezone
from sentry.db.models import FlexibleForeignKey, Model, sane_repr
class GroupAssignee(Model):
"""
Identifies an assignment relationship between a user and an
aggregated event (Group).
"""
__core__ = False
project = FlexibleForeignKey('sentry.Project', related_name="assignee_set")
group = FlexibleForeignKey('sentry.Group', related_name="assignee_set", unique=True)
user = FlexibleForeignKey(settings.AUTH_USER_MODEL, related_name="sentry_assignee_set")
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'sentry'
db_table = 'sentry_groupasignee'
__repr__ = sane_repr('group_id', 'user_id')
|
cortedeltimo/SickRage
|
refs/heads/master
|
lib/hachoir_parser/archive/bzip2_parser.py
|
74
|
"""
BZIP2 archive file
Author: Victor Stinner, Robert Xiao
"""
from hachoir_parser import Parser
from hachoir_core.tools import paddingSize
from hachoir_core.field import (Field, FieldSet, GenericVector,
ParserError, String,
PaddingBits, Bit, Bits, Character,
UInt32, Enum, CompressedField)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_parser.archive.zlib import build_tree, HuffmanCode
try:
from bz2 import BZ2Decompressor
class Bunzip2:
def __init__(self, stream):
self.bzip2 = BZ2Decompressor()
def __call__(self, size, data=''):
try:
return self.bzip2.decompress(data)
except EOFError:
return ''
has_deflate = True
except ImportError:
has_deflate = False
class ZeroTerminatedNumber(Field):
"""Zero (bit) terminated number: e.g. 11110 is 4."""
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, 0, description)
endian = self.parent.endian
stream = self.parent.stream
addr = self.absolute_address
value = 0
while True:
bit = stream.readBits(addr, 1, endian)
addr += 1
self._size += 1
if not bit:
break
value += 1
self._value = value
def createValue(self):
return self._value
def move_to_front(l, c):
l[:] = l[c:c+1] + l[0:c] + l[c+1:]
class Bzip2Bitmap(FieldSet):
def __init__(self, parent, name, nb_items, start_index, *args, **kwargs):
FieldSet.__init__(self, parent, name, *args, **kwargs)
self.nb_items = nb_items
self.start_index = start_index
def createFields(self):
for i in xrange(self.start_index, self.start_index+self.nb_items):
yield Bit(self, "symbol_used[%i]"%i, "Is the symbol %i (%r) used?"%(i, chr(i)))
class Bzip2Lengths(FieldSet):
def __init__(self, parent, name, symbols, *args, **kwargs):
FieldSet.__init__(self, parent, name, *args, **kwargs)
self.symbols = symbols
def createFields(self):
yield Bits(self, "start_length", 5)
length = self["start_length"].value
lengths = []
for i in xrange(self.symbols):
while True:
bit = Bit(self, "change_length[%i][]"%i, "Should the length be changed for symbol %i?"%i)
yield bit
if not bit.value:
break
else:
bit = Enum(Bit(self, "length_decrement[%i][]"%i, "Decrement the value?"), {True: "Decrement", False: "Increment"})
yield bit
if bit.value:
length -= 1
else:
length += 1
lengths.append(length)
self.final_length = length
self.tree = build_tree(lengths)
class Bzip2Selectors(FieldSet):
def __init__(self, parent, name, ngroups, *args, **kwargs):
FieldSet.__init__(self, parent, name, *args, **kwargs)
self.groups = range(ngroups)
def createFields(self):
for i in xrange(self["../selectors_used"].value):
field = ZeroTerminatedNumber(self, "selector_list[]")
move_to_front(self.groups, field.value)
field.realvalue = self.groups[0]
field._description = "MTF'ed selector index: raw value %i, real value %i"%(field.value, field.realvalue)
yield field
class Bzip2Block(FieldSet):
def createFields(self):
yield textHandler(Bits(self, "blockheader", 48, "Block header"), hexadecimal)
if self["blockheader"].value != 0x314159265359: # pi
raise ParserError("Invalid block header!")
yield textHandler(UInt32(self, "crc32", "CRC32 for this block"), hexadecimal)
yield Bit(self, "randomized", "Is this block randomized?")
yield Bits(self, "orig_bwt_pointer", 24, "Starting pointer into BWT after untransform")
yield GenericVector(self, "huffman_used_map", 16, Bit, 'block_used', "Bitmap showing which blocks (representing 16 literals each) are in use")
symbols_used = []
for index, block_used in enumerate(self["huffman_used_map"].array('block_used')):
if block_used.value:
start_index = index*16
field = Bzip2Bitmap(self, "huffman_used_bitmap[%i]"%index, 16, start_index, "Bitmap for block %i (literals %i to %i) showing which symbols are in use"%(index, start_index, start_index + 15))
yield field
for i, used in enumerate(field):
if used.value:
symbols_used.append(start_index + i)
yield Bits(self, "huffman_groups", 3, "Number of different Huffman tables in use")
yield Bits(self, "selectors_used", 15, "Number of times the Huffman tables are switched")
yield Bzip2Selectors(self, "selectors_list", self["huffman_groups"].value)
trees = []
for group in xrange(self["huffman_groups"].value):
field = Bzip2Lengths(self, "huffman_lengths[]", len(symbols_used)+2)
yield field
trees.append(field.tree)
counter = 0
rle_run = 0
selector_tree = None
while True:
if counter%50 == 0:
select_id = self["selectors_list"].array("selector_list")[counter//50].realvalue
selector_tree = trees[select_id]
field = HuffmanCode(self, "huffman_code[]", selector_tree)
if field.realvalue in [0, 1]:
# RLE codes
if rle_run == 0:
rle_power = 1
rle_run += (field.realvalue + 1) * rle_power
rle_power <<= 1
field._description = "RLE Run Code %i (for %r); Total accumulated run %i (Huffman Code %i)" % (field.realvalue, chr(symbols_used[0]), rle_run, field.value)
elif field.realvalue == len(symbols_used)+1:
field._description = "Block Terminator (%i) (Huffman Code %i)"%(field.realvalue, field.value)
yield field
break
else:
rle_run = 0
move_to_front(symbols_used, field.realvalue-1)
field._description = "Literal %r (value %i) (Huffman Code %i)"%(chr(symbols_used[0]), field.realvalue, field.value)
yield field
if field.realvalue == len(symbols_used)+1:
break
counter += 1
class Bzip2Stream(FieldSet):
START_BLOCK = 0x314159265359 # pi
END_STREAM = 0x177245385090 # sqrt(pi)
def createFields(self):
end = False
while not end:
marker = self.stream.readBits(self.absolute_address + self.current_size, 48, self.endian)
if marker == self.START_BLOCK:
yield Bzip2Block(self, "block[]")
elif marker == self.END_STREAM:
yield textHandler(Bits(self, "stream_end", 48, "End-of-stream marker"), hexadecimal)
yield textHandler(UInt32(self, "crc32", "CRC32 for entire stream"), hexadecimal)
padding = paddingSize(self.current_size, 8)
if padding:
yield PaddingBits(self, "padding[]", padding)
end = True
else:
raise ParserError("Invalid marker 0x%02X!"%marker)
class Bzip2Parser(Parser):
PARSER_TAGS = {
"id": "bzip2",
"category": "archive",
"file_ext": ("bz2",),
"mime": (u"application/x-bzip2",),
"min_size": 10*8,
"magic": (('BZh', 0),),
"description": "bzip2 archive"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 3) != 'BZh':
return "Wrong file signature"
if not("1" <= self["blocksize"].value <= "9"):
return "Wrong blocksize"
return True
def createFields(self):
yield String(self, "id", 3, "Identifier (BZh)", charset="ASCII")
yield Character(self, "blocksize", "Block size (KB of memory needed to uncompress)")
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
size = (self._size - self.current_size)/8
if size:
for tag, filename in self.stream.tags:
if tag == "filename" and filename.endswith(".bz2"):
filename = filename[:-4]
break
else:
filename = None
data = Bzip2Stream(self, "file", size=size*8)
if has_deflate:
CompressedField(self, Bunzip2)
def createInputStream(**args):
if filename:
args.setdefault("tags",[]).append(("filename", filename))
return self._createInputStream(**args)
data._createInputStream = createInputStream
yield data
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/mpatch.py
|
96
|
# mpatch.py - Python implementation of mpatch.c
#
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import struct
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# This attempts to apply a series of patches in time proportional to
# the total size of the patches, rather than patches * len(text). This
# means rather than shuffling strings around, we shuffle around
# pointers to fragments with fragment lists.
#
# When the fragment lists get too long, we collapse them. To do this
# efficiently, we do all our operations inside a buffer created by
# mmap and simply use memmove. This avoids creating a bunch of large
# temporary string buffers.
def patches(a, bins):
if not bins:
return a
plens = [len(x) for x in bins]
pl = sum(plens)
bl = len(a) + pl
tl = bl + bl + pl # enough for the patches and two working texts
b1, b2 = 0, bl
if not tl:
return a
m = StringIO()
def move(dest, src, count):
"""move count bytes from src to dest
The file pointer is left at the end of dest.
"""
m.seek(src)
buf = m.read(count)
m.seek(dest)
m.write(buf)
# load our original text
m.write(a)
frags = [(len(a), b1)]
# copy all the patches into our segment so we can memmove from them
pos = b2 + bl
m.seek(pos)
for p in bins: m.write(p)
def pull(dst, src, l): # pull l bytes from src
while l:
f = src.pop()
if f[0] > l: # do we need to split?
src.append((f[0] - l, f[1] + l))
dst.append((l, f[1]))
return
dst.append(f)
l -= f[0]
def collect(buf, list):
start = buf
for l, p in reversed(list):
move(buf, p, l)
buf += l
return (buf - start, start)
for plen in plens:
# if our list gets too long, execute it
if len(frags) > 128:
b2, b1 = b1, b2
frags = [collect(b1, frags)]
new = []
end = pos + plen
last = 0
while pos < end:
m.seek(pos)
p1, p2, l = struct.unpack(">lll", m.read(12))
pull(new, frags, p1 - last) # what didn't change
pull([], frags, p2 - p1) # what got deleted
new.append((l, pos + 12)) # what got added
pos += l + 12
last = p2
frags.extend(reversed(new)) # what was left at the end
t = collect(b2, frags)
m.seek(t[1])
return m.read(t[0])
def patchedsize(orig, delta):
outlen, last, bin = 0, 0, 0
binend = len(delta)
data = 12
while data <= binend:
decode = delta[bin:bin + 12]
start, end, length = struct.unpack(">lll", decode)
if start > end:
break
bin = data + length
data = bin + 12
outlen += start - last
last = end
outlen += length
if bin != binend:
raise ValueError("patch cannot be decoded")
outlen += orig - last
return outlen
|
mzdaniel/oh-mainline
|
refs/heads/master
|
vendor/packages/typecheck/tests/test_bwcompat_1-6.py
|
16
|
import support
from support import TODO, TestCase
if __name__ == '__main__':
support.adjust_path()
### /Bookkeeping ###
import types
import typecheck.doctest_support
from typecheck import typecheck, TypeCheckException, Any
class _TestSuite(TestCase):
def testCreateTypeCheckedMethod(self):
@typecheck(int)
def f(a):
return 1
self.assertEquals(1, f(5))
try:
f('a')
self.fail()
except TypeCheckException:
pass
def testCreateTypeCheckedMethodPositional(self):
@typecheck(int, int, str)
def f(a, b, c):
return 1
self.assertEquals(1, f(5, 6, '7'))
for a, b, c in [(5, 6, 7), ('5', 6, '7'), (8, '9', 10), (8, '9', '10')]:
try:
f(a, b, c)
self.fail('Failed with values (%s, %s, %s)' % (a, b, c))
except TypeCheckException:
pass
def testCreateTypeCheckedMethodKeyword(self):
# The original did not supply a type for b
@typecheck(a=int, b=Any(), c=str)
def f(a=None, b=None, c=None):
return 1
self.assertEquals(1, f(5, 6, '7'))
self.assertEquals(1, f(5, [], '7'))
for a, b, c in [(5, 6, 7), ('11', 12, '13'), (8, '9', 10)]:
try:
self.assertEquals(1, f(a=a, b=b, c=c))
self.fail('Failed with values (%s, %s, %s)' % (a, b, c))
except TypeCheckException:
pass
def testCreateTypeCheckedMethodCombined(self):
@typecheck(int, b=int, c=str)
def f(a, b=None, c=None):
return 1
self.assertEquals(1, f(5, 6, '7'))
self.assertEquals(1, f(5, 13, 'hello'))
for a, b, c in [(5, 6, 7), ('11', 12, '13'), (8, '9', 10)]:
try:
self.assertEquals(1, f(a, b=b, c=c))
self.fail('Failed with values (%s, %s, %s)' % (a, b, c))
except TypeCheckException:
pass
def testTypeCheckedMethodRetainsName(self):
@typecheck(int)
def f(a):
pass
self.assertEquals('f', f.__name__)
def testTypeCheckedMethodRetainsDocstring(self):
@typecheck(int)
def f(a):
'docstring'
pass
self.assertEquals('docstring', f.__doc__)
def testTypeCheckedDocstringGetsFoundByDoctest(self):
import doctest
import doctests
finder = doctest.DocTestFinder(verbose=True)
tests = finder.find(doctests)
self.assertEquals(3, len(tests))
runner = doctest.DocTestRunner(doctest.OutputChecker())
for test in tests:
runner.run(test)
self.assertEquals(7, runner.summarize()[1])
self.assertEquals(0, runner.summarize()[0])
def testOldStyleClassesAcceptedAsPatterns(self):
class T:
pass
@typecheck(T)
def f(t_instance):
pass
##########################################################################
def a_testTypeCheckMatchesKwToPnIfNoCorrespondingKw(self):
@typecheck(b=str)
def my_func(a, b):
pass
def a_testTypeCheckMatchesKeywordsToPositionalNames(self):
@typecheck(a=str)
def my_func(a):
pass
try:
my_func(4, 7)
self.fail('Should have raised a TypeCheckException')
except TypeCheckException:
pass
except:
self.fail('Should have raised a TypeCheckException')
@typecheck(a=str, b=int)
def my_func(a, b, c):
pass
try:
my_func(4, 7, 7)
self.fail('Should have raised a TypeCheckException')
except TypeCheckException:
pass
except:
self.fail('Should have raised a TypeCheckException')
try:
my_func('4', 7, 7)
self.fail('Should have raised a TypeCheckException')
except TypeCheckException:
pass
except:
self.fail('Should have raised a TypeCheckException')
try:
my_func(4, '7', 7)
self.fail('Should have raised a TypeCheckException')
except TypeCheckException:
pass
except:
self.fail('Should have raised a TypeCheckException')
### Bookkeeping ###
if __name__ == '__main__':
import __main__
support.run_all_tests(__main__)
|
suninsky/ReceiptOCR
|
refs/heads/master
|
Python/server/lib/python2.7/site-packages/PIL/EpsImagePlugin.py
|
21
|
#
# The Python Imaging Library.
# $Id$
#
# EPS file handling
#
# History:
# 1995-09-01 fl Created (0.1)
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
# 1996-08-22 fl Don't choke on floating point BoundingBox values
# 1996-08-23 fl Handle files from Macintosh (0.3)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
# 2014-05-07 e Handling of EPS with binary preview and fixed resolution
# resizing
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import re
import io
import sys
from PIL import Image, ImageFile, _binary
__version__ = "0.5"
#
# --------------------------------------------------------------------
i32 = _binary.i32le
o32 = _binary.o32le
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
gs_windows_binary = None
if sys.platform.startswith('win'):
import shutil
if hasattr(shutil, 'which'):
which = shutil.which
else:
# Python < 3.3
import distutils.spawn
which = distutils.spawn.find_executable
for binary in ('gswin32c', 'gswin64c', 'gs'):
if which(binary) is not None:
gs_windows_binary = binary
break
else:
gs_windows_binary = False
def has_ghostscript():
if gs_windows_binary:
return True
if not sys.platform.startswith('win'):
import subprocess
try:
gs = subprocess.Popen(['gs', '--version'], stdout=subprocess.PIPE)
gs.stdout.read()
return True
except OSError:
# no ghostscript
pass
return False
def Ghostscript(tile, size, fp, scale=1):
"""Render an image using Ghostscript"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
# Hack to support hi-res rendering
scale = int(scale) or 1
# orig_size = size
# orig_bbox = bbox
size = (size[0] * scale, size[1] * scale)
# resolution is dependent on bbox and size
res = (float((72.0 * size[0]) / (bbox[2]-bbox[0])),
float((72.0 * size[1]) / (bbox[3]-bbox[1])))
# print("Ghostscript", scale, size, orig_size, bbox, orig_bbox, res)
import os
import subprocess
import tempfile
out_fd, outfile = tempfile.mkstemp()
os.close(out_fd)
infile_temp = None
if hasattr(fp, 'name') and os.path.exists(fp.name):
infile = fp.name
else:
in_fd, infile_temp = tempfile.mkstemp()
os.close(in_fd)
infile = infile_temp
# ignore length and offset!
# ghostscript can read it
# copy whole file to read in ghostscript
with open(infile_temp, 'wb') as f:
# fetch length of fp
fp.seek(0, 2)
fsize = fp.tell()
# ensure start position
# go back
fp.seek(0)
lengthfile = fsize
while lengthfile > 0:
s = fp.read(min(lengthfile, 100*1024))
if not s:
break
lengthfile -= len(s)
f.write(s)
# Build ghostscript command
command = ["gs",
"-q", # quiet mode
"-g%dx%d" % size, # set output geometry (pixels)
"-r%fx%f" % res, # set input DPI (dots per inch)
"-dNOPAUSE", # don't pause between pages,
"-dSAFER", # safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % outfile, # output file
"-c", "%d %d translate" % (-bbox[0], -bbox[1]),
# adjust for image origin
"-f", infile, # input file
]
if gs_windows_binary is not None:
if not gs_windows_binary:
raise WindowsError('Unable to locate Ghostscript on paths')
command[0] = gs_windows_binary
# push data through ghostscript
try:
gs = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
gs.stdin.close()
status = gs.wait()
if status:
raise IOError("gs failed (status %d)" % status)
im = Image.open(outfile)
im.load()
finally:
try:
os.unlink(outfile)
if infile_temp:
os.unlink(infile_temp)
except OSError:
pass
return im.im.copy()
class PSFile(object):
"""
Wrapper for bytesio object that treats either CR or LF as end of line.
"""
def __init__(self, fp):
self.fp = fp
self.char = None
def seek(self, offset, whence=0):
self.char = None
self.fp.seek(offset, whence)
def readline(self):
s = self.char or b""
self.char = None
c = self.fp.read(1)
while c not in b"\r\n":
s = s + c
c = self.fp.read(1)
self.char = self.fp.read(1)
# line endings can be 1 or 2 of \r \n, in either order
if self.char in b"\r\n":
self.char = None
return s.decode('latin-1')
def _accept(prefix):
return prefix[:4] == b"%!PS" or \
(len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5)
##
# Image plugin for Encapsulated Postscript. This plugin supports only
# a few variants of this format.
class EpsImageFile(ImageFile.ImageFile):
"""EPS File Parser for the Python Imaging Library"""
format = "EPS"
format_description = "Encapsulated Postscript"
mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"}
def _open(self):
(length, offset) = self._find_offset(self.fp)
# Rewrap the open file pointer in something that will
# convert line endings and decode to latin-1.
try:
if bytes is str:
# Python2, no encoding conversion necessary
fp = open(self.fp.name, "Ur")
else:
# Python3, can use bare open command.
fp = open(self.fp.name, "Ur", encoding='latin-1')
except:
# Expect this for bytesio/stringio
fp = PSFile(self.fp)
# go to offset - start of "%!PS"
fp.seek(offset)
box = None
self.mode = "RGB"
self.size = 1, 1 # FIXME: huh?
#
# Load EPS header
s = fp.readline().strip('\r\n')
while s:
if len(s) > 255:
raise SyntaxError("not an EPS file")
try:
m = split.match(s)
except re.error as v:
raise SyntaxError("not an EPS file")
if m:
k, v = m.group(1, 2)
self.info[k] = v
if k == "BoundingBox":
try:
# Note: The DSC spec says that BoundingBox
# fields should be integers, but some drivers
# put floating point values there anyway.
box = [int(float(i)) for i in v.split()]
self.size = box[2] - box[0], box[3] - box[1]
self.tile = [("eps", (0, 0) + self.size, offset,
(length, box))]
except:
pass
else:
m = field.match(s)
if m:
k = m.group(1)
if k == "EndComments":
break
if k[:8] == "PS-Adobe":
self.info[k[:8]] = k[9:]
else:
self.info[k] = ""
elif s[0] == '%':
# handle non-DSC Postscript comments that some
# tools mistakenly put in the Comments section
pass
else:
raise IOError("bad EPS header")
s = fp.readline().strip('\r\n')
if s[:1] != "%":
break
#
# Scan for an "ImageData" descriptor
while s[:1] == "%":
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[:11] == "%ImageData:":
# Encoded bitmapped image.
x, y, bi, mo = s[11:].split(None, 7)[:4]
if int(bi) != 8:
break
try:
self.mode = self.mode_map[int(mo)]
except ValueError:
break
self.size = int(x), int(y)
return
s = fp.readline().strip('\r\n')
if not s:
break
if not box:
raise IOError("cannot determine EPS bounding box")
def _find_offset(self, fp):
s = fp.read(160)
if s[:4] == b"%!PS":
# for HEAD without binary preview
fp.seek(0, 2)
length = fp.tell()
offset = 0
elif i32(s[0:4]) == 0xC6D3D0C5:
# FIX for: Some EPS file not handled correctly / issue #302
# EPS can contain binary data
# or start directly with latin coding
# more info see:
# http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
offset = i32(s[4:8])
length = i32(s[8:12])
else:
raise SyntaxError("not an EPS file")
return (length, offset)
def load(self, scale=1):
# Load EPS via Ghostscript
if not self.tile:
return
self.im = Ghostscript(self.tile, self.size, self.fp, scale)
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def load_seek(self, *args, **kwargs):
# we can't incrementally load, so force ImageFile.parser to
# use our custom load method by defining this method.
pass
#
# --------------------------------------------------------------------
def _save(im, fp, filename, eps=1):
"""EPS Writer for the Python Imaging Library."""
#
# make sure image data is available
im.load()
#
# determine postscript image mode
if im.mode == "L":
operator = (8, 1, "image")
elif im.mode == "RGB":
operator = (8, 3, "false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, "false 4 colorimage")
else:
raise ValueError("image mode is not supported")
class NoCloseStream(object):
def __init__(self, fp):
self.fp = fp
def __getattr__(self, name):
return getattr(self.fp, name)
def close(self):
pass
base_fp = fp
if fp != sys.stdout:
fp = NoCloseStream(fp)
if sys.version_info[0] > 2:
fp = io.TextIOWrapper(fp, encoding='latin-1')
if eps:
#
# write EPS header
fp.write("%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write("%%Creator: PIL 0.1 EpsEncode\n")
# fp.write("%%CreationDate: %s"...)
fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write("%%Pages: 1\n")
fp.write("%%EndComments\n")
fp.write("%%Page: 1 1\n")
fp.write("%%ImageData: %d %d " % im.size)
fp.write("%d %d 0 1 1 \"%s\"\n" % operator)
#
# image header
fp.write("gsave\n")
fp.write("10 dict begin\n")
fp.write("/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write("%d %d scale\n" % im.size)
fp.write("%d %d 8\n" % im.size) # <= bits
fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write("{ currentfile buf readhexstring pop } bind\n")
fp.write(operator[2] + "\n")
if hasattr(fp, "flush"):
fp.flush()
ImageFile._save(im, base_fp, [("eps", (0, 0)+im.size, 0, None)])
fp.write("\n%%%%EndBinary\n")
fp.write("grestore end\n")
if hasattr(fp, "flush"):
fp.flush()
#
# --------------------------------------------------------------------
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
Image.register_save(EpsImageFile.format, _save)
Image.register_extension(EpsImageFile.format, ".ps")
Image.register_extension(EpsImageFile.format, ".eps")
Image.register_mime(EpsImageFile.format, "application/postscript")
|
noam-stratoscale/rackattack-api
|
refs/heads/master
|
test/twistedserver_publishperiodically.py
|
3
|
from twisted.internet import reactor
from rackattack.tcp import publish
import threading
import sys
import time
class Do(threading.Thread):
def __init__(self, pub):
self._pub = pub
threading.Thread.__init__(self)
self.daemon = True
threading.Thread.start(self)
def run(self):
while True:
time.sleep(0.1)
self._pub.publish('default topic', dict(data="fake data"))
factory = publish.PublishFactory()
reactor.listenTCP(int(sys.argv[1]), factory)
do = Do(factory)
reactor.run()
|
MarcdeFalco/dagger
|
refs/heads/master
|
knowledge/migrations/0010_atomrelationshiptype_fuse_into.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('knowledge', '0009_atomtype_important'),
]
operations = [
migrations.AddField(
model_name='atomrelationshiptype',
name='fuse_into',
field=models.BooleanField(default=False),
),
]
|
pkubatrh/s2i-python-container
|
refs/heads/master
|
examples/npm-virtualenv-uwsgi-test-app/wsgi.py
|
25
|
from flask import Flask
application = Flask(__name__)
@application.route('/')
def hello():
return b'Hello World from uWSGI hosted WSGI application!'
if __name__ == '__main__':
application.run()
|
jantman/biweeklybudget
|
refs/heads/master
|
biweeklybudget/tests/acceptance/flaskapp/views/test_reconcile.py
|
1
|
"""
The latest version of this package is available at:
<http://github.com/jantman/biweeklybudget>
################################################################################
Copyright 2016 Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
This file is part of biweeklybudget, also known as biweeklybudget.
biweeklybudget is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
biweeklybudget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with biweeklybudget. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/biweeklybudget> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
################################################################################
"""
import pytest
from datetime import datetime, date
from pytz import UTC
from decimal import Decimal
import re
import json
from time import sleep
from selenium.webdriver import ActionChains
import requests
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from biweeklybudget.utils import dtnow, fmt_currency
from biweeklybudget.tests.acceptance_helpers import AcceptanceHelper
from biweeklybudget.models import *
from biweeklybudget.tests.sqlhelpers import restore_mysqldump
from biweeklybudget.tests.conftest import get_db_engine
dnow = dtnow()
def txn_div(id, dt, amt, acct_name, acct_id,
budget_name, budget_id, desc, drop_div=''):
"""
Return the HTML for a Transaction div.
:param drop_div: contents of ``reconcile-drop-target`` div
:type drop_div: str
:return: HTML for Transaction reconcile div
:rtype: str
"""
s = '<div class="reconcile reconcile-trans ui-droppable" ' \
'id="trans-%s" data-trans-id="%s" data-acct-id="%s" data-amt="%s">' % (
id, id, acct_id, amt
)
s += '<div class="row">'
s += '<div class="col-lg-3">%s</div>' % dt.strftime('%Y-%m-%d')
s += '<div class="col-lg-3">%s</div>' % fmt_currency(amt)
s += '<div class="col-lg-3"><strong>Acct:</strong> '
s += '<span style="white-space: nowrap;">'
s += '<a href="/accounts/%s">%s (%s)</a>' % (acct_id, acct_name, acct_id)
s += '</span></div>'
s += '<div class="col-lg-3"><strong>Budget:</strong> '
s += '<span style="white-space: nowrap;">'
if isinstance(budget_name, type([])) and budget_id is None:
b1 = budget_name[0]
b2 = budget_name[1]
s += '<a href="/budgets/%s">%s (%s) (%s)</a><br>' % (
b1[1], b1[0], b1[1], b1[2]
)
s += '<a href="/budgets/%s">%s (%s) (%s)</a>' % (
b2[1], b2[0], b2[1], b2[2]
)
else:
s += '<a href="/budgets/%s">%s (%s)</a>' % (
budget_id, budget_name, budget_id
)
s += '</span></div>'
s += '</div>'
s += '<div class="row"><div class="col-lg-12">'
if drop_div == '':
s += '<div style="float: left;"><a href="javascript:transModal('
s += '%s, function () { updateReconcileTrans(%s) })">Trans %s</a>' \
': %s</div>' % (id, id, id, desc)
s += '<div style="float: right;" class="trans-no-ofx"><a ' \
'href="javascript:transNoOfx(%s)" style="" title="Reconcile ' \
'as never having a matching OFX Transaction">(no OFX)</a>' \
'</div>' % id
else:
s += '<div style="float: left;"><span class="disabledEditLink">' \
'Trans %s</span>: %s</div>' % (id, desc)
s += '<div style="float: right;" class="trans-no-ofx"><a ' \
'href="javascript:transNoOfx(%s)" style="display: none;" ' \
'title="Reconcile as never having a matching OFX Transaction">' \
'(no OFX)</a></div>' % id
s += '</div></div>'
s += '<div class="reconcile-drop-target">%s</div>' % drop_div
s += '</div>'
return s
def clean_fitid(fitid):
return re.sub(r'\W', '', fitid)
def ofx_div(dt_posted, amt, acct_name, acct_id, trans_type, fitid, name,
trans_id=None, ignored_reason=None):
"""
Return the HTML for an OFXTransaction div.
:param trans_id: if dropped on a Transaction div, the trans_id
:type trans_id: int
:return: HTML for OFXTransaction reconcile div
:rtype: str
"""
cfitid = clean_fitid(fitid)
if int(amt) == amt:
# JS doesn't put the trailing decimal on a ".0" number
amt = int(amt)
if trans_id is not None:
classes = 'reconcile reconcile-ofx-dropped'
_id = 'dropped-ofx-%s-%s' % (acct_id, cfitid)
else:
classes = 'reconcile reconcile-ofx ui-draggable ui-draggable-handle'
_id = 'ofx-%s-%s' % (acct_id, cfitid)
if ignored_reason is not None:
classes += ' ui-draggable-disabled'
s = '<div class="%s" id="%s" data-acct-id="%s" ' \
'data-amt="%s" data-fitid="%s" style="">' % (
classes, _id, acct_id, amt, fitid
)
if ignored_reason is not None:
s += '<div class="row" id="ofx-%s-%s-noTrans" style=""><div ' \
'class="col-lg-12"><p><strong>No Trans:</strong> %s</p></div>' \
'</div>' % (acct_id, fitid, ignored_reason)
s += '<div class="row">'
s += '<div class="col-lg-3">%s</div>' % dt_posted.strftime('%Y-%m-%d')
s += '<div class="col-lg-3">%s</div>' % fmt_currency(amt)
s += '<div class="col-lg-3"><strong>Acct:</strong> '
s += '<span style="white-space: nowrap;">'
s += '<a href="/accounts/%s">%s (%s)</a>' % (acct_id, acct_name, acct_id)
s += '</span></div>'
s += '<div class="col-lg-3"><strong>Type:</strong> %s</div>' % trans_type
s += '</div>'
s += '<div class="row"><div class="col-lg-12">'
s += '<div style="float: left;">'
s += '<a href="javascript:ofxTransModal(%s, \'%s\', false)">%s</a>' % (
acct_id, cfitid, fitid
)
s += ': %s' % name
s += '</div>'
if trans_id is None and ignored_reason is None:
s += '<div style="float: right;" class="make-trans-link">'
s += '<a href="javascript:makeTransFromOfx(%d, \'%s\')" ' \
'title="Create Transaction from this OFX">(make trans)</a>' % (
acct_id, fitid
)
s += '<a href="javascript:ignoreOfxTrans(%d, \'%s\')" ' \
'title="Ignore this OFX Transaction">(ignore)</a>' % (
acct_id, fitid
)
s += '</div>'
elif ignored_reason is not None:
s += '<div style="float: right;" class="make-trans-link">'
s += '<a href="javascript:reconcileDoUnreconcileNoTrans(%s, \'%s\')">' \
'Unignore</a>' % (
acct_id, fitid
)
s += '</div>'
s += '</div>'
s += '</div></div>'
if trans_id is not None:
return '<div style="text-align: right;"><a href="javascript:reconc' \
'ileDoUnreconcile(%s, %s, \'%s\')">Unreconcile</a></div>%s' % (
trans_id, acct_id, fitid, s
)
return s
@pytest.mark.acceptance
@pytest.mark.usefixtures('refreshdb', 'testflask')
class TestReconcile(AcceptanceHelper):
@pytest.fixture(autouse=True)
def get_page(self, base_url, selenium):
self.baseurl = base_url
self.get(selenium, base_url + '/reconcile')
def test_heading(self, selenium):
heading = selenium.find_element_by_class_name('navbar-brand')
assert heading.text == 'Reconcile Transactions - BiweeklyBudget'
def test_nav_menu(self, selenium):
ul = selenium.find_element_by_id('side-menu')
assert ul is not None
assert 'nav' in ul.get_attribute('class')
assert ul.tag_name == 'ul'
def test_notifications(self, selenium):
div = selenium.find_element_by_id('notifications-row')
assert div is not None
assert div.get_attribute('class') == 'row'
class ReconcileHelper(AcceptanceHelper):
def get_reconciled(self, driver):
"""
Execute javascript in the selenium browser to return the
``reconciled`` JavaScript object as a JSON string; deserialize the
JSON and return the resulting dict.
:param driver: Selenium driver instance
:type driver: selenium.webdriver.remote.webdriver.WebDriver
:return: ``reconciled`` javascript variable from page
:rtype: dict
"""
script = 'return JSON.stringify(reconciled);'
res = driver.execute_script(script)
print("reconciled JSON: %s" % res)
r = json.loads(res)
return {int(x): r[x] for x in r}
def test_00_clean_db(self, dump_file_path):
# clean the database; empty schema
restore_mysqldump(dump_file_path, get_db_engine(), with_data=False)
def test_01_add_accounts(self, testdb):
a = Account(
description='First Bank Account',
name='BankOne',
ofx_cat_memo_to_name=True,
ofxgetter_config_json='{"foo": "bar"}',
vault_creds_path='secret/foo/bar/BankOne',
acct_type=AcctType.Bank,
re_interest_charge='^interest-charge',
re_payment='^(payment|thank you)',
re_late_fee='^Late Fee',
re_other_fee='^re-other-fee'
)
testdb.add(a)
a.set_balance(
overall_date=datetime(2017, 4, 10, 12, 0, 0, tzinfo=UTC),
ledger=Decimal('1.0'),
ledger_date=datetime(2017, 4, 10, 12, 0, 0, tzinfo=UTC)
)
b = Account(
description='Second Bank Account',
name='BankTwo',
acct_type=AcctType.Bank,
negate_ofx_amounts=True
)
testdb.add(b)
b.set_balance(
overall_date=datetime(2017, 4, 10, 12, 0, 0, tzinfo=UTC),
ledger=Decimal('1.0'),
ledger_date=datetime(2017, 4, 10, 12, 0, 0, tzinfo=UTC)
)
testdb.flush()
testdb.commit()
def test_02_add_budgets(self, testdb):
testdb.add(Budget(
name='1Income',
is_periodic=True,
description='1Income',
starting_balance=Decimal('0.0'),
is_income=True
))
testdb.add(Budget(
name='2Periodic',
is_periodic=True,
description='2Periodic',
starting_balance=Decimal('500.00')
))
testdb.add(Budget(
name='3Periodic',
is_periodic=True,
description='3Periodic',
starting_balance=Decimal('0.00')
))
testdb.flush()
testdb.commit()
def test_03_add_transactions(self, testdb):
acct1 = testdb.query(Account).get(1)
acct2 = testdb.query(Account).get(2)
ibudget = testdb.query(Budget).get(1)
e1budget = testdb.query(Budget).get(2)
e2budget = testdb.query(Budget).get(3)
# income - matches OFX1
t1 = Transaction(
date=date(2017, 4, 10),
budget_amounts={ibudget: Decimal('-100.00')},
budgeted_amount=Decimal('-100.00'),
description='income',
account=acct1,
planned_budget=ibudget
)
testdb.add(t1)
# one transaction - matches OFX2
t2 = Transaction(
date=date(2017, 4, 10),
budget_amounts={e2budget: Decimal('250.00')},
description='trans1',
account=acct1
)
testdb.add(t2)
# another transaction - matches OFX3
st1 = ScheduledTransaction(
amount=Decimal('500.0'),
description='ST1',
account=acct2,
budget=e1budget,
date=date(2017, 4, 10)
)
testdb.add(st1)
t3 = Transaction(
date=date(2017, 4, 11),
budget_amounts={
e1budget: Decimal('590.00'),
e2budget: Decimal('10.00')
},
budgeted_amount=Decimal('500.0'),
description='trans2',
account=acct2,
planned_budget=e1budget,
scheduled_trans=st1
)
testdb.add(t3)
# non-matched transaction
t4 = Transaction(
date=date(2017, 4, 14),
budget_amounts={e2budget: Decimal('10.00')},
description='trans3',
account=acct2
)
testdb.add(t4)
# matched ScheduledTransaction
st2 = ScheduledTransaction(
amount=Decimal('10.0'),
description='ST2',
account=acct1,
budget=e2budget,
day_of_month=13
)
testdb.add(st2)
# pair that matches OFXT6 and OFXT7
t5 = Transaction(
date=date(2017, 4, 16),
budget_amounts={e2budget: Decimal('25.00')},
description='trans4',
account=acct2
)
testdb.add(t5)
t6 = Transaction(
date=date(2017, 4, 17),
budget_amounts={e2budget: Decimal('25.00')},
description='trans5',
account=acct2
)
testdb.add(t6)
testdb.flush()
testdb.commit()
def test_04_add_ofx(self, testdb):
acct1 = testdb.query(Account).get(1)
acct2 = testdb.query(Account).get(2)
stmt1 = OFXStatement(
account=acct1,
filename='a1.ofx',
file_mtime=dnow,
as_of=dnow,
currency='USD',
acctid='1',
bankid='b1',
routing_number='r1'
)
testdb.add(stmt1)
stmt2 = OFXStatement(
account=acct2,
filename='a2.ofx',
file_mtime=dnow,
as_of=dnow,
currency='USD',
acctid='2',
bankid='b2',
routing_number='r2'
)
testdb.add(stmt2)
################
# transactions #
################
# matches Transaction 1
testdb.add(OFXTransaction(
account=acct1,
statement=stmt1,
fitid='OFX1',
trans_type='Deposit',
date_posted=datetime(2017, 4, 10, 12, 3, 4, tzinfo=UTC),
amount=Decimal('-100.0'),
name='ofx1-income'
))
# matches Transaction 2
testdb.add(OFXTransaction(
account=acct1,
statement=stmt1,
fitid='OFX2',
trans_type='Debit',
date_posted=datetime(2017, 4, 11, 12, 3, 4, tzinfo=UTC),
amount=Decimal('250.0'),
name='ofx2-trans1'
))
# matches Transcation 3
testdb.add(OFXTransaction(
account=acct2,
statement=stmt2,
fitid='OFX3',
trans_type='Purchase',
date_posted=datetime(2017, 4, 9, 12, 3, 4, tzinfo=UTC),
amount=Decimal('-600.0'),
name='ofx3-trans2-st1'
))
# non-matched - have Transaction 4 same amt but wrong acct
testdb.add(OFXTransaction(
account=acct1,
statement=stmt1,
fitid='OFXT4',
trans_type='Purchase',
date_posted=datetime(2017, 4, 14, 12, 3, 4, tzinfo=UTC),
amount=Decimal('10.0'),
name='ofx4-st2'
))
# matches ScheduledTransaction 2
testdb.add(OFXTransaction(
account=acct1,
statement=stmt1,
fitid='OFXT5',
trans_type='Foo',
date_posted=datetime(2017, 4, 16, 12, 3, 4, tzinfo=UTC),
amount=Decimal('10.0'),
name='ofx5'
))
# pair of matched transactions - Transaction 4 and 5
testdb.add(OFXTransaction(
account=acct2,
statement=stmt2,
fitid='OFXT6',
trans_type='Foo',
date_posted=datetime(2017, 4, 16, 12, 3, 4, tzinfo=UTC),
amount=Decimal('-25.0'),
name='ofx6'
))
testdb.add(OFXTransaction(
account=acct2,
statement=stmt2,
fitid='OFXT7',
trans_type='Foo',
date_posted=datetime(2017, 4, 17, 12, 3, 4, tzinfo=UTC),
amount=Decimal('-25.0'),
name='ofx7'
))
testdb.flush()
testdb.commit()
def test_05_add_reconciled(self, testdb):
acct2 = testdb.query(Account).get(2)
stmt2 = testdb.query(OFXStatement).get(2)
e2budget = testdb.query(Budget).get(3)
o = OFXTransaction(
account=acct2,
statement=stmt2,
fitid='OFX8',
trans_type='Purchase',
date_posted=datetime(2017, 4, 17, 12, 3, 4, tzinfo=UTC),
amount=Decimal('-600.0'),
name='ofx8-trans4'
)
testdb.add(o)
t = Transaction(
date=date(2017, 4, 16),
budget_amounts={e2budget: Decimal('600.00')},
description='trans6',
account=acct2
)
testdb.add(t)
testdb.add(TxnReconcile(transaction=t, ofx_trans=o))
testdb.flush()
testdb.commit()
@pytest.mark.acceptance
@pytest.mark.usefixtures('class_refresh_db', 'refreshdb')
@pytest.mark.incremental
class TestColumns(ReconcileHelper):
def test_06_transactions(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
trans_div = selenium.find_element_by_id('trans-panel')
actual_trans = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in trans_div.find_elements_by_class_name('reconcile-trans')
]
expected_trans = [
txn_div(
1,
date(2017, 4, 10),
-100,
'BankOne', 1,
'1Income', 1,
'income'
),
txn_div(
2,
date(2017, 4, 10),
250,
'BankOne', 1,
'3Periodic', 3,
'trans1'
),
txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2'
),
txn_div(
4,
date(2017, 4, 14),
10,
'BankTwo', 2,
'3Periodic', 3,
'trans3'
),
txn_div(
5,
date(2017, 4, 16),
25,
'BankTwo', 2,
'3Periodic', 3,
'trans4'
),
txn_div(
6,
date(2017, 4, 17),
25,
'BankTwo', 2,
'3Periodic', 3,
'trans5'
)
]
assert actual_trans == expected_trans
def test_07_verify_unreconciled_ofxtrans(self, testdb):
assert len(OFXTransaction.unreconciled(testdb).all()) == 7
def test_08_add_ignored_ofxtrans(self, testdb):
"""
add OFXTransactions that shouldn't be listed because of their is_ fields
"""
acct = testdb.query(Account).get(1)
assert acct.re_interest_charge == '^interest-charge'
assert acct.re_interest_paid is None
assert acct.re_payment == '^(payment|thank you)'
assert acct.re_late_fee == '^Late Fee'
assert acct.re_other_fee == '^re-other-fee'
stmt = testdb.query(OFXStatement).get(1)
assert stmt.account_id == 1
assert stmt.filename == 'a1.ofx'
testdb.add(OFXTransaction(
account=acct,
statement=stmt,
fitid='BankOne.77.1',
trans_type='Debit',
date_posted=stmt.ledger_bal_as_of,
amount=Decimal('-20.00'),
name='interest-charge BankOne.77.1'
))
testdb.add(OFXTransaction(
account=acct,
statement=stmt,
fitid='BankOne.77.2',
trans_type='Debit',
date_posted=stmt.ledger_bal_as_of,
amount=Decimal('-20.00'),
name='payment BankOne.77.2'
))
testdb.add(OFXTransaction(
account=acct,
statement=stmt,
fitid='BankOne.77.3',
trans_type='Debit',
date_posted=stmt.ledger_bal_as_of,
amount=Decimal('-20.00'),
name='thank you BankOne.77.3'
))
testdb.add(OFXTransaction(
account=acct,
statement=stmt,
fitid='BankOne.77.4',
trans_type='Debit',
date_posted=stmt.ledger_bal_as_of,
amount=Decimal('-20.00'),
name='Late Fee BankOne.77.4'
))
testdb.add(OFXTransaction(
account=acct,
statement=stmt,
fitid='BankOne.77.5',
trans_type='Debit',
date_posted=stmt.ledger_bal_as_of,
amount=Decimal('-20.00'),
name='re-other-fee BankOne.77.5'
))
testdb.commit()
def test_09_verify_unreconciled_ofxtrans(self, testdb):
assert len(OFXTransaction.unreconciled(testdb).all()) == 7
def test_10_ofxtrans(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
ofxtrans_div = selenium.find_element_by_id('ofx-panel')
actual_ofx = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in ofxtrans_div.find_elements_by_class_name('reconcile-ofx')
]
expected_ofx = [
ofx_div(
date(2017, 4, 9),
Decimal('600.00'),
'BankTwo', 2,
'Purchase',
'OFX3',
'ofx3-trans2-st1'
),
ofx_div(
date(2017, 4, 10),
-100,
'BankOne', 1,
'Deposit',
'OFX1',
'ofx1-income'
),
ofx_div(
date(2017, 4, 11),
250,
'BankOne', 1,
'Debit',
'OFX2',
'ofx2-trans1'
),
ofx_div(
date(2017, 4, 14),
10,
'BankOne', 1,
'Purchase',
'OFXT4',
'ofx4-st2'
),
ofx_div(
date(2017, 4, 16),
10,
'BankOne', 1,
'Foo',
'OFXT5',
'ofx5'
),
ofx_div(
date(2017, 4, 16),
25,
'BankTwo', 2,
'Foo',
'OFXT6',
'ofx6'
),
ofx_div(
date(2017, 4, 17),
25,
'BankTwo', 2,
'Foo',
'OFXT7',
'ofx7'
)
]
assert expected_ofx == actual_ofx
@pytest.mark.acceptance
@pytest.mark.usefixtures('class_refresh_db', 'refreshdb')
@pytest.mark.incremental
class TestAccountReconcileFalse(ReconcileHelper):
def test_06_transactions(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
trans_div = selenium.find_element_by_id('trans-panel')
actual_trans = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in trans_div.find_elements_by_class_name('reconcile-trans')
]
expected_trans = [
txn_div(
1,
date(2017, 4, 10),
-100,
'BankOne', 1,
'1Income', 1,
'income'
),
txn_div(
2,
date(2017, 4, 10),
250,
'BankOne', 1,
'3Periodic', 3,
'trans1'
),
txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2'
),
txn_div(
4,
date(2017, 4, 14),
10,
'BankTwo', 2,
'3Periodic', 3,
'trans3'
),
txn_div(
5,
date(2017, 4, 16),
25,
'BankTwo', 2,
'3Periodic', 3,
'trans4'
),
txn_div(
6,
date(2017, 4, 17),
25,
'BankTwo', 2,
'3Periodic', 3,
'trans5'
)
]
assert actual_trans == expected_trans
def test_07_ofxtrans(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
ofxtrans_div = selenium.find_element_by_id('ofx-panel')
actual_ofx = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in ofxtrans_div.find_elements_by_class_name('reconcile-ofx')
]
expected_ofx = [
ofx_div(
date(2017, 4, 9),
Decimal('600.00'),
'BankTwo', 2,
'Purchase',
'OFX3',
'ofx3-trans2-st1'
),
ofx_div(
date(2017, 4, 10),
-100,
'BankOne', 1,
'Deposit',
'OFX1',
'ofx1-income'
),
ofx_div(
date(2017, 4, 11),
250,
'BankOne', 1,
'Debit',
'OFX2',
'ofx2-trans1'
),
ofx_div(
date(2017, 4, 14),
10,
'BankOne', 1,
'Purchase',
'OFXT4',
'ofx4-st2'
),
ofx_div(
date(2017, 4, 16),
10,
'BankOne', 1,
'Foo',
'OFXT5',
'ofx5'
),
ofx_div(
date(2017, 4, 16),
25,
'BankTwo', 2,
'Foo',
'OFXT6',
'ofx6'
),
ofx_div(
date(2017, 4, 17),
25,
'BankTwo', 2,
'Foo',
'OFXT7',
'ofx7'
)
]
assert expected_ofx == actual_ofx
def test_08_set_do_not_reconcile(self, testdb):
acct = testdb.query(Account).get(2)
acct.reconcile_trans = False
testdb.flush()
testdb.commit()
def test_09_transactions(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
trans_div = selenium.find_element_by_id('trans-panel')
actual_trans = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in trans_div.find_elements_by_class_name('reconcile-trans')
]
expected_trans = [
txn_div(
1,
date(2017, 4, 10),
-100,
'BankOne', 1,
'1Income', 1,
'income'
),
txn_div(
2,
date(2017, 4, 10),
250,
'BankOne', 1,
'3Periodic', 3,
'trans1'
)
]
assert actual_trans == expected_trans
def test_10_ofxtrans(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
ofxtrans_div = selenium.find_element_by_id('ofx-panel')
actual_ofx = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in ofxtrans_div.find_elements_by_class_name('reconcile-ofx')
]
expected_ofx = [
ofx_div(
date(2017, 4, 10),
-100,
'BankOne', 1,
'Deposit',
'OFX1',
'ofx1-income'
),
ofx_div(
date(2017, 4, 11),
250,
'BankOne', 1,
'Debit',
'OFX2',
'ofx2-trans1'
),
ofx_div(
date(2017, 4, 14),
10,
'BankOne', 1,
'Purchase',
'OFXT4',
'ofx4-st2'
),
ofx_div(
date(2017, 4, 16),
10,
'BankOne', 1,
'Foo',
'OFXT5',
'ofx5'
)
]
assert expected_ofx == actual_ofx
@pytest.mark.acceptance
@pytest.mark.usefixtures('class_refresh_db', 'refreshdb')
@pytest.mark.incremental
class TestTransactionEditModal(ReconcileHelper):
def test_06_verify_db(self, testdb):
t = testdb.query(Transaction).get(1)
assert t is not None
assert t.description == 'income'
assert t.date == date(2017, 4, 10)
assert t.actual_amount == Decimal('-100.00')
assert t.account_id == 1
assert t.planned_budget_id == 1
assert len(t.budget_transactions) == 1
assert t.budget_transactions[0].budget_id == 1
assert t.budget_transactions[0].amount == Decimal('-100.00')
def test_07_edit(self, base_url, selenium):
self.baseurl = base_url
self.get(selenium, base_url + '/reconcile')
link = selenium.find_element_by_xpath('//a[text()="Trans 1"]')
modal, title, body = self.try_click_and_get_modal(selenium, link)
self.assert_modal_displayed(modal, title, body)
assert title.text == 'Edit Transaction 1'
assert body.find_element_by_id(
'trans_frm_id').get_attribute('value') == '1'
amt = body.find_element_by_id('trans_frm_amount')
amt.clear()
amt.send_keys('-123.45')
desc = body.find_element_by_id('trans_frm_description')
desc.send_keys('edited')
# submit the form
selenium.find_element_by_id('modalSaveButton').click()
self.wait_for_jquery_done(selenium)
# check that we got positive confirmation
_, _, body = self.get_modal_parts(selenium)
x = body.find_elements_by_tag_name('div')[0]
assert 'alert-success' in x.get_attribute('class')
assert x.text.strip() == 'Successfully saved Transaction 1 ' \
'in database.'
# dismiss the modal
selenium.find_element_by_id('modalCloseButton').click()
self.wait_for_jquery_done(selenium)
# test that updated budget was removed from the page
trans_div = selenium.find_element_by_id('trans-panel')
actual_trans = [
self.normalize_html(t.get_attribute('outerHTML'))
for t in trans_div.find_elements_by_class_name('reconcile-trans')
]
expected_trans = [
txn_div(
1,
date(2017, 4, 10),
Decimal('-123.45'),
'BankOne', 1,
'1Income', 1,
'incomeedited'
),
txn_div(
2,
date(2017, 4, 10),
250,
'BankOne', 1,
'3Periodic', 3,
'trans1'
),
txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2'
),
txn_div(
4,
date(2017, 4, 14),
10,
'BankTwo', 2,
'3Periodic', 3,
'trans3'
),
txn_div(
5,
date(2017, 4, 16),
25,
'BankTwo', 2,
'3Periodic', 3,
'trans4'
),
txn_div(
6,
date(2017, 4, 17),
25,
'BankTwo', 2,
'3Periodic', 3,
'trans5'
)
]
assert actual_trans == expected_trans
@pytest.mark.acceptance
@pytest.mark.usefixtures('class_refresh_db', 'refreshdb')
@pytest.mark.incremental
class TestDragLimitations(ReconcileHelper):
def test_06_success(self, base_url, selenium):
self.baseurl = base_url
self.get(selenium, base_url + '/reconcile')
src = selenium.find_element_by_id('ofx-2-OFX3')
tgt = selenium.find_element_by_id(
'trans-3').find_element_by_class_name('reconcile-drop-target')
# drag and drop
chain = ActionChains(selenium)
chain.drag_and_drop(src, tgt).perform()
# ensure that the OFX div was hidden in the OFX column
src = selenium.find_element_by_id('ofx-2-OFX3')
assert src.is_displayed() is False
# ensure that the OFX div was placed in the drop target
tgt = selenium.find_element_by_id('trans-3')
expected = txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2',
drop_div=ofx_div(
date(2017, 4, 9),
Decimal('600.00'),
'BankTwo', 2,
'Purchase',
'OFX3',
'ofx3-trans2-st1',
trans_id=3
)
)
assert self.normalize_html(tgt.get_attribute('outerHTML')) == expected
# ensure the reconciled variable was updated
assert self.get_reconciled(selenium) == {
3: [2, 'OFX3']
}
def test_07_already_has_ofx(self, base_url, selenium):
self.baseurl = base_url
self.get(selenium, base_url + '/reconcile')
src = selenium.find_element_by_id('ofx-2-OFXT6')
src2 = selenium.find_element_by_id('ofx-2-OFXT7')
tgt = selenium.find_element_by_id(
'trans-5').find_element_by_class_name('reconcile-drop-target')
# drag and drop
chain = ActionChains(selenium)
chain.drag_and_drop(src, tgt).perform()
# ensure that the OFX div was hidden in the OFX column
src = selenium.find_element_by_id('ofx-2-OFXT6')
assert src.is_displayed() is False
# ensure that the OFX div was placed in the drop target
tgt = selenium.find_element_by_id('trans-5')
expected = txn_div(
5,
date(2017, 4, 16),
25,
'BankTwo', 2,
'3Periodic', 3,
'trans4',
drop_div=ofx_div(
date(2017, 4, 16),
25,
'BankTwo', 2,
'Foo',
'OFXT6',
'ofx6',
trans_id=5
)
)
assert self.normalize_html(tgt.get_attribute('outerHTML')) == expected
# ensure the reconciled variable was updated
assert self.get_reconciled(selenium) == {
5: [2, 'OFXT6']
}
# get the innerHTML of both columns
trans_div = selenium.find_element_by_id('trans-panel').get_attribute(
'innerHTML')
ofxtrans_div = selenium.find_element_by_id('ofx-panel').get_attribute(
'innerHTML')
# attempt to drag the other OFX
chain = ActionChains(selenium)
chain.drag_and_drop(src2, tgt).perform()
# sleep a bit for the drag to stop
sleep(1)
# ensure both columns are still the same
assert selenium.find_element_by_id('trans-panel').get_attribute(
'innerHTML') == trans_div
assert selenium.find_element_by_id('ofx-panel').get_attribute(
'innerHTML') == ofxtrans_div
# ensure reconciled JS var is still the same
assert self.get_reconciled(selenium) == {
5: [2, 'OFXT6']
}
def test_08_wrong_account(self, base_url, selenium):
self.baseurl = base_url
self.get(selenium, base_url + '/reconcile')
src = selenium.find_element_by_id('ofx-1-OFXT4')
tgt = selenium.find_element_by_id(
'trans-4').find_element_by_class_name('reconcile-drop-target')
# get the innerHTML of both columns
trans_div = selenium.find_element_by_id('trans-panel').get_attribute(
'innerHTML')
ofxtrans_div = selenium.find_element_by_id('ofx-panel').get_attribute(
'innerHTML')
# drag and drop
chain = ActionChains(selenium)
chain.drag_and_drop(src, tgt).perform()
# sleep a bit for the drag to stop
sleep(1)
# ensure both columns are still the same
assert selenium.find_element_by_id('trans-panel').get_attribute(
'innerHTML') == trans_div
assert selenium.find_element_by_id('ofx-panel').get_attribute(
'innerHTML') == ofxtrans_div
# ensure reconciled JS var is still the same
assert self.get_reconciled(selenium) == {}
def test_09_wrong_amount(self, base_url, selenium):
self.baseurl = base_url
self.get(selenium, base_url + '/reconcile')
src = selenium.find_element_by_id('ofx-1-OFXT4')
tgt = selenium.find_element_by_id(
'trans-1').find_element_by_class_name('reconcile-drop-target')
# get the innerHTML of both columns
trans_div = selenium.find_element_by_id('trans-panel').get_attribute(
'innerHTML')
ofxtrans_div = selenium.find_element_by_id('ofx-panel').get_attribute(
'innerHTML')
# drag and drop
chain = ActionChains(selenium)
chain.drag_and_drop(src, tgt).perform()
# sleep a bit for the drag to stop
sleep(1)
# ensure both columns are still the same
assert selenium.find_element_by_id('trans-panel').get_attribute(
'innerHTML') == trans_div
assert selenium.find_element_by_id('ofx-panel').get_attribute(
'innerHTML') == ofxtrans_div
# ensure reconciled JS var is still the same
assert self.get_reconciled(selenium) == {}
def test_10_wrong_acct_and_amount(self, base_url, selenium):
self.baseurl = base_url
self.get(selenium, base_url + '/reconcile')
src = selenium.find_element_by_id('ofx-1-OFXT4')
tgt = selenium.find_element_by_id(
'trans-3').find_element_by_class_name('reconcile-drop-target')
# get the innerHTML of both columns
trans_div = selenium.find_element_by_id('trans-panel').get_attribute(
'innerHTML')
ofxtrans_div = selenium.find_element_by_id('ofx-panel').get_attribute(
'innerHTML')
# drag and drop
chain = ActionChains(selenium)
chain.drag_and_drop(src, tgt).perform()
# sleep a bit for the drag to stop
sleep(1)
# ensure both columns are still the same
assert selenium.find_element_by_id('trans-panel').get_attribute(
'innerHTML') == trans_div
assert selenium.find_element_by_id('ofx-panel').get_attribute(
'innerHTML') == ofxtrans_div
# ensure reconciled JS var is still the same
assert self.get_reconciled(selenium) == {}
def test_11_unreconcile(self, base_url, selenium):
self.baseurl = base_url
self.get(selenium, base_url + '/reconcile')
src = selenium.find_element_by_id('ofx-2-OFX3')
tgt = selenium.find_element_by_id(
'trans-3').find_element_by_class_name('reconcile-drop-target')
# drag and drop
chain = ActionChains(selenium)
chain.drag_and_drop(src, tgt).perform()
# ensure that the OFX div was hidden in the OFX column
src = selenium.find_element_by_id('ofx-2-OFX3')
assert src.is_displayed() is False
# ensure that the OFX div was placed in the drop target
tgt = selenium.find_element_by_id('trans-3')
expected = txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2',
drop_div=ofx_div(
date(2017, 4, 9),
Decimal('600.00'),
'BankTwo', 2,
'Purchase',
'OFX3',
'ofx3-trans2-st1',
trans_id=3
)
)
assert self.normalize_html(tgt.get_attribute('outerHTML')) == expected
# ensure the reconciled variable was updated
assert self.get_reconciled(selenium) == {
3: [2, 'OFX3']
}
# unreconcile
link = tgt.find_element_by_xpath('//a[text()="Unreconcile"]')
link.click()
src = selenium.find_element_by_id('ofx-2-OFX3')
tgt = selenium.find_element_by_id('trans-3')
assert src.is_displayed() is True
assert self.normalize_html(src.get_attribute('outerHTML')) == ofx_div(
date(2017, 4, 9),
Decimal('600.00'),
'BankTwo', 2,
'Purchase',
'OFX3',
'ofx3-trans2-st1'
)
assert tgt.find_element_by_class_name(
'reconcile-drop-target').get_attribute('innerHTML') == ''
assert self.get_reconciled(selenium) == {}
expected = txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2'
)
assert self.normalize_html(tgt.get_attribute('outerHTML')) == expected
@pytest.mark.acceptance
@pytest.mark.usefixtures('class_refresh_db', 'refreshdb')
@pytest.mark.incremental
class TestDragAndDropReconcile(ReconcileHelper):
def test_06_verify_db(self, testdb):
res = testdb.query(TxnReconcile).all()
assert len(res) == 1
assert res[0].id == 1
assert res[0].txn_id == 7
assert res[0].ofx_account_id == 2
assert res[0].ofx_fitid == 'OFX8'
def test_07_drag_and_drop(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
# drag and drop
chain = ActionChains(selenium)
chain.drag_and_drop(
selenium.find_element_by_id('ofx-2-OFX3'),
selenium.find_element_by_id(
'trans-3'
).find_element_by_class_name('reconcile-drop-target')
).perform()
chain.drag_and_drop(
selenium.find_element_by_id('ofx-1-OFX1'),
selenium.find_element_by_id(
'trans-1'
).find_element_by_class_name('reconcile-drop-target')
).perform()
chain.drag_and_drop(
selenium.find_element_by_id('ofx-1-OFX2'),
selenium.find_element_by_id(
'trans-2'
).find_element_by_class_name('reconcile-drop-target')
).perform()
chain.drag_and_drop(
selenium.find_element_by_id('ofx-2-OFXT6'),
selenium.find_element_by_id(
'trans-5'
).find_element_by_class_name('reconcile-drop-target')
).perform()
chain.drag_and_drop(
selenium.find_element_by_id('ofx-2-OFXT7'),
selenium.find_element_by_id(
'trans-6'
).find_element_by_class_name('reconcile-drop-target')
).perform()
# ensure the reconciled variable was updated
assert self.get_reconciled(selenium) == {
3: [2, 'OFX3'],
1: [1, 'OFX1'],
2: [1, 'OFX2'],
5: [2, 'OFXT6'],
6: [2, 'OFXT7']
}
# click submit button
selenium.find_element_by_id('reconcile-submit').click()
sleep(1)
self.wait_for_jquery_done(selenium)
assert self.get_reconciled(selenium) == {}
msg = selenium.find_element_by_id('reconcile-msg')
assert msg.text == 'Successfully reconciled 5 transactions'
assert 'alert-success' in msg.get_attribute('class')
def test_08_submit_with_nothing_reconciled(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
assert self.get_reconciled(selenium) == {}
# get the innerHTML of both columns
trans_div = selenium.find_element_by_id('trans-panel').get_attribute(
'innerHTML')
ofxtrans_div = selenium.find_element_by_id('ofx-panel').get_attribute(
'innerHTML')
# attempt to drag the other OFX
selenium.find_element_by_id('reconcile-submit').click()
sleep(1)
self.wait_for_jquery_done(selenium)
# ensure both columns are still the same
assert selenium.find_element_by_id('trans-panel').get_attribute(
'innerHTML') == trans_div
assert selenium.find_element_by_id('ofx-panel').get_attribute(
'innerHTML') == ofxtrans_div
msg = selenium.find_element_by_id('reconcile-msg')
assert msg.text == 'Warning: No reconciled transactions; ' \
'did not submit form.'
assert 'alert-warning' in msg.get_attribute('class')
@pytest.mark.acceptance
@pytest.mark.usefixtures('class_refresh_db', 'refreshdb')
@pytest.mark.incremental
class TestUIReconcileMulti(ReconcileHelper):
def test_06_verify_db(self, testdb):
res = testdb.query(TxnReconcile).all()
assert len(res) == 1
assert res[0].id == 1
assert res[0].txn_id == 7
assert res[0].ofx_account_id == 2
assert res[0].ofx_fitid == 'OFX8'
def test_07_drag_and_drop(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
# drag and drop
chain = ActionChains(selenium)
chain.drag_and_drop(
selenium.find_element_by_id('ofx-2-OFX3'),
selenium.find_element_by_id(
'trans-3'
).find_element_by_class_name('reconcile-drop-target')
).perform()
# ensure the reconciled variable was updated
assert self.get_reconciled(selenium) == {
3: [2, 'OFX3']
}
# click submit button
selenium.find_element_by_id('reconcile-submit').click()
sleep(1)
self.wait_for_jquery_done(selenium)
assert self.get_reconciled(selenium) == {}
msg = selenium.find_element_by_id('reconcile-msg')
assert msg.text == 'Successfully reconciled 1 transactions'
assert 'alert-success' in msg.get_attribute('class')
# reconcile 2 more
self.wait_for_id(selenium, 'ofx-1-OFX2')
chain = ActionChains(selenium)
chain.drag_and_drop(
selenium.find_element_by_id('ofx-1-OFX1'),
selenium.find_element_by_id(
'trans-1'
).find_element_by_class_name('reconcile-drop-target')
).perform()
chain.drag_and_drop(
selenium.find_element_by_id('ofx-1-OFX2'),
selenium.find_element_by_id(
'trans-2'
).find_element_by_class_name('reconcile-drop-target')
).perform()
# ensure the reconciled variable was updated
assert self.get_reconciled(selenium) == {
1: [1, 'OFX1'],
2: [1, 'OFX2']
}
# click submit button
selenium.find_element_by_id('reconcile-submit').click()
sleep(1)
self.wait_for_jquery_done(selenium)
assert self.get_reconciled(selenium) == {}
msg = selenium.find_element_by_id('reconcile-msg')
assert msg.text == 'Successfully reconciled 2 transactions'
assert 'alert-success' in msg.get_attribute('class')
def test_08_invalid_trans_id(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
assert self.get_reconciled(selenium) == {}
script = 'reconciled[1234] = [4, "OFXNONE"];'
selenium.execute_script(script)
assert self.get_reconciled(selenium) == {
1234: [4, "OFXNONE"]
}
# click submit button
selenium.find_element_by_id('reconcile-submit').click()
sleep(1)
self.wait_for_jquery_done(selenium)
assert self.get_reconciled(selenium) == {
1234: [4, "OFXNONE"]
}
msg = selenium.find_element_by_id('reconcile-msg')
assert msg.text == 'Error 400: Invalid Transaction ID: 1234'
assert 'alert-danger' in msg.get_attribute('class')
@pytest.mark.acceptance
@pytest.mark.usefixtures('class_refresh_db', 'refreshdb')
@pytest.mark.incremental
class TestReconcileBackend(ReconcileHelper):
def test_06_verify_db(self, testdb):
res = testdb.query(TxnReconcile).all()
assert len(res) == 1
assert res[0].id == 1
assert res[0].txn_id == 7
assert res[0].ofx_account_id == 2
assert res[0].ofx_fitid == 'OFX8'
def test_07_success(self, base_url):
res = requests.post(
base_url + '/ajax/reconcile',
json={'reconciled': {3: [2, 'OFX3']}, 'ofxIgnored': {}}
)
assert res.json() == {
'success': True,
'success_message': 'Successfully reconciled 1 transactions'
}
assert res.status_code == 200
def test_08_verify_db(self, testdb):
res = testdb.query(TxnReconcile).all()
assert len(res) == 2
assert res[0].id == 1
assert res[1].id == 2
assert res[1].txn_id == 3
assert res[1].ofx_account_id == 2
assert res[1].ofx_fitid == 'OFX3'
def test_09_invalid_trans(self, base_url, testdb):
res = requests.post(
base_url + '/ajax/reconcile',
json={'reconciled': {32198: [2, 'OFX3']}, 'ofxIgnored': {}}
)
assert res.json() == {
'success': False,
'error_message': 'Invalid Transaction ID: 32198'
}
assert res.status_code == 400
assert len(testdb.query(TxnReconcile).all()) == 2
def test_10_invalid_ofx(self, base_url, testdb):
res = requests.post(
base_url + '/ajax/reconcile',
json={'reconciled': {3: [2, 'OFX338ufd']}, 'ofxIgnored': {}}
)
assert res.json() == {
'success': False,
'error_message': "Invalid OFXTransaction: (2, 'OFX338ufd')"
}
assert res.status_code == 400
assert len(testdb.query(TxnReconcile).all()) == 2
def test_10_commit_exception(self, base_url):
# already reconciled in test_07
res = requests.post(
base_url + '/ajax/reconcile',
json={'reconciled': {3: [2, 'OFX3']}, 'ofxIgnored': {}}
)
j = res.json()
assert sorted(j.keys()) == ['error_message', 'success']
assert j['success'] is False
assert j['error_message'].startswith('Exception committing reconcile')
assert "Duplicate entry '3' for key " \
"'uq_txn_reconciles_txn_id'" in j['error_message']
assert res.status_code == 400
def test_11_verify_db(self, testdb):
testdb.expire_all()
res = testdb.query(TxnReconcile).all()
assert len(res) == 2
assert res[0].id == 1
assert res[1].id == 2
assert res[1].txn_id == 3
assert res[1].ofx_account_id == 2
assert res[1].ofx_fitid == 'OFX3'
def test_12_reconcile_noOFX(self, base_url):
res = requests.post(
base_url + '/ajax/reconcile',
json={'reconciled': {4: 'Foo Bar Baz'}, 'ofxIgnored': {}}
)
assert res.json() == {
'success': True,
'success_message': 'Successfully reconciled 1 transactions'
}
assert res.status_code == 200
def test_13_verify_db(self, testdb):
testdb.expire_all()
res = testdb.query(TxnReconcile).all()
assert len(res) == 3
assert res[2].txn_id == 4
assert res[2].note == 'Foo Bar Baz'
def test_14_verify_reconcile_modal(self, base_url, selenium, testdb):
res = testdb.query(TxnReconcile).all()
txn_id = res[-1].txn_id
self.get(selenium, base_url + '/transactions')
modal, title, body = self.try_click_and_get_modal(
selenium, selenium.find_element_by_link_text('Yes (%s)' % txn_id)
)
self.assert_modal_displayed(modal, title, body)
assert title.text == 'Transaction Reconcile %s' % txn_id
assert 'Foo Bar Baz' in body.text
assert body.find_elements_by_class_name('col-lg-6')[1].text == \
'No OFX Transaction'
@pytest.mark.acceptance
@pytest.mark.usefixtures('class_refresh_db', 'refreshdb')
@pytest.mark.incremental
class TestOFXMakeTransAndIgnore(AcceptanceHelper):
def get_reconciled(self, driver):
"""
Execute javascript in the selenium browser to return the
``reconciled`` JavaScript object as a JSON string; deserialize the
JSON and return the resulting dict.
:param driver: Selenium driver instance
:type driver: selenium.webdriver.remote.webdriver.WebDriver
:return: ``reconciled`` javascript variable from page
:rtype: dict
"""
script = 'return JSON.stringify(reconciled);'
res = driver.execute_script(script)
print("reconciled JSON: %s" % res)
r = json.loads(res)
return {int(x): r[x] for x in r}
def test_00_clean_db(self, dump_file_path):
# clean the database; empty schema
restore_mysqldump(dump_file_path, get_db_engine(), with_data=False)
def test_01_add_accounts(self, testdb):
a = Account(
description='First Bank Account',
name='BankOne',
ofx_cat_memo_to_name=True,
ofxgetter_config_json='{"foo": "bar"}',
vault_creds_path='secret/foo/bar/BankOne',
acct_type=AcctType.Bank
)
testdb.add(a)
a.set_balance(
overall_date=datetime(2017, 4, 10, 12, 0, 0, tzinfo=UTC),
ledger=Decimal('1.0'),
ledger_date=datetime(2017, 4, 10, 12, 0, 0, tzinfo=UTC)
)
b = Account(
description='Second Bank Account',
name='BankTwo',
acct_type=AcctType.Bank,
negate_ofx_amounts=True
)
testdb.add(b)
b.set_balance(
overall_date=datetime(2017, 4, 10, 12, 0, 0, tzinfo=UTC),
ledger=Decimal('1.0'),
ledger_date=datetime(2017, 4, 10, 12, 0, 0, tzinfo=UTC)
)
testdb.flush()
testdb.commit()
def test_02_add_budgets(self, testdb):
testdb.add(Budget(
name='1Income',
is_periodic=True,
description='1Income',
starting_balance=Decimal('0.0'),
is_income=True
))
testdb.add(Budget(
name='2Periodic',
is_periodic=True,
description='2Periodic',
starting_balance=Decimal('500.00')
))
testdb.add(Budget(
name='3Periodic',
is_periodic=True,
description='3Periodic',
starting_balance=Decimal('0.00')
))
testdb.flush()
testdb.commit()
def test_03_add_transactions(self, testdb):
acct1 = testdb.query(Account).get(1)
ibudget = testdb.query(Budget).get(1)
# income - matches OFX1
t = Transaction(
date=date(2017, 4, 10),
budget_amounts={ibudget: Decimal('-123.45')},
budgeted_amount=Decimal('-123.45'),
description='income',
account=acct1,
planned_budget=ibudget
)
testdb.add(t)
testdb.flush()
testdb.commit()
def test_04_add_ofx(self, testdb):
acct2 = testdb.query(Account).get(2)
stmt1 = OFXStatement(
account=acct2,
filename='a2.ofx',
file_mtime=dnow,
as_of=dnow,
currency='USD',
acctid='2',
bankid='b1',
routing_number='r1'
)
testdb.add(stmt1)
# matches Transaction 2
testdb.add(OFXTransaction(
account=acct2,
statement=stmt1,
fitid='OFX2',
trans_type='Debit',
date_posted=datetime(2017, 4, 11, 12, 3, 4, tzinfo=UTC),
amount=Decimal('251.23'),
name='ofx2-trans1'
))
testdb.flush()
testdb.commit()
def test_06_verify_db(self, testdb):
res = testdb.query(TxnReconcile).all()
assert len(res) == 0
stmts = testdb.query(OFXStatement).all()
assert len(stmts) == 1
assert max([s.id for s in stmts]) == 1
def test_07_verify_db_transaction(self, testdb):
res = testdb.query(Transaction).all()
assert len(res) == 1
assert res[0].id == 1
def test_08_trans_from_ofx(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
ofxdiv = selenium.find_element_by_id('ofx-2-OFX2')
link = ofxdiv.find_element_by_xpath('//a[text()="(make trans)"]')
# test the modal population
modal, title, body = self.try_click_and_get_modal(selenium, link)
self.assert_modal_displayed(modal, title, body)
assert title.text == 'Add Transaction for OFX (2, OFX2)'
assert body.find_element_by_id(
'trans_frm_date').get_attribute('value') == date(
2017, 4, 11).strftime('%Y-%m-%d')
assert body.find_element_by_id(
'trans_frm_amount').get_attribute('value') == '-251.23'
assert body.find_element_by_id(
'trans_frm_description').get_attribute('value') == 'ofx2-trans1'
acct_sel = Select(body.find_element_by_id('trans_frm_account'))
opts = []
for o in acct_sel.options:
opts.append([o.get_attribute('value'), o.text])
assert opts == [
['None', ''],
['1', 'BankOne'],
['2', 'BankTwo']
]
assert acct_sel.first_selected_option.get_attribute('value') == '2'
budget_sel = Select(body.find_element_by_id('trans_frm_budget'))
opts = []
for o in budget_sel.options:
opts.append([o.get_attribute('value'), o.text])
assert opts == [
['None', ''],
['1', '1Income (i)'],
['2', '2Periodic'],
['3', '3Periodic']
]
budget_sel.select_by_value('2')
notes = selenium.find_element_by_id('trans_frm_notes')
assert notes.get_attribute(
'value') == 'created from OFXTransaction(2, OFX2)'
notes.send_keys('foo')
# submit the form
selenium.find_element_by_id('modalSaveButton').click()
self.wait_for_jquery_done(selenium)
# check that we got positive confirmation
_, _, body = self.get_modal_parts(selenium)
x = body.find_elements_by_tag_name('div')[0]
assert 'alert-success' in x.get_attribute('class')
assert x.text.strip() == 'Successfully saved Transaction 2 ' \
'in database.'
# dismiss the modal
selenium.find_element_by_id('modalCloseButton').click()
self.wait_for_jquery_done(selenium)
# ensure that the original OFX div is hidden
assert selenium.find_element_by_id('ofx-2-OFX2').is_displayed() is False
# ensure the reconciled variable was updated
assert self.get_reconciled(selenium) == {
2: [2, 'OFX2']
}
# ensure that the Transaction was added, and the ofx moved to it
trans_div = selenium.find_element_by_id('trans-panel')
actual_trans = [
self.normalize_html(t.get_attribute('outerHTML'))
for t in trans_div.find_elements_by_class_name('reconcile-trans')
]
expected_trans = [
txn_div(
1,
date(2017, 4, 10),
Decimal('-123.45'),
'BankOne', 1,
'1Income', 1,
'income'
),
txn_div(
2,
date(2017, 4, 11),
Decimal('-251.23'),
'BankTwo', 2,
'2Periodic', 2,
'ofx2-trans1',
drop_div=ofx_div(
date(2017, 4, 11),
Decimal('-251.23'),
'BankTwo', 2,
'Debit',
'OFX2',
'ofx2-trans1',
trans_id=2
)
)
]
assert actual_trans == expected_trans
# wait for submit button to be visible and clickable, and click it
self.wait_for_jquery_done(selenium)
WebDriverWait(selenium, 10).until(
EC.invisibility_of_element_located((By.ID, 'modalDiv'))
)
WebDriverWait(selenium, 10).until(
EC.element_to_be_clickable((By.ID, 'reconcile-submit'))
)
selenium.find_element_by_id('reconcile-submit').click()
sleep(1)
self.wait_for_jquery_done(selenium)
assert self.get_reconciled(selenium) == {}
msg = selenium.find_element_by_id('reconcile-msg')
assert msg.text == 'Successfully reconciled 1 transactions'
assert 'alert-success' in msg.get_attribute('class')
def test_09_verify_db_txnreconcile(self, testdb):
res = testdb.query(TxnReconcile).all()
assert len(res) == 1
assert res[0].id == 1
assert res[0].txn_id == 2
assert res[0].ofx_account_id == 2
assert res[0].ofx_fitid == 'OFX2'
def test_10_verify_db_transaction(self, testdb):
res = testdb.query(Transaction).all()
assert len(res) == 2
assert res[1].id == 2
assert res[1].account_id == 2
assert res[1].date == date(2017, 4, 11)
assert res[1].actual_amount == Decimal('-251.23')
assert res[1].description == 'ofx2-trans1'
assert res[1].notes == 'created from OFXTransaction(2, OFX2)foo'
assert len(res[1].budget_transactions) == 1
assert res[1].budget_transactions[0].budget_id == 2
assert res[1].budget_transactions[0].amount == Decimal('-251.23')
def test_30_add_ofx(self, testdb):
acct2 = testdb.query(Account).get(2)
stmt1 = testdb.query(OFXStatement).get(1)
testdb.add(OFXTransaction(
account=acct2,
statement=stmt1,
fitid='OFX30',
trans_type='Debit',
date_posted=datetime(2017, 4, 11, 12, 3, 4, tzinfo=UTC),
amount=Decimal('251.23'),
name='ofx2-trans30'
))
testdb.add(OFXTransaction(
account=acct2,
statement=stmt1,
fitid='OFX31',
trans_type='Debit',
date_posted=datetime(2017, 4, 10, 12, 3, 4, tzinfo=UTC),
amount=Decimal('192.86'),
name='ofx2-trans31'
))
testdb.flush()
testdb.commit()
def test_31_verify_db(self, testdb):
res = testdb.query(TxnReconcile).all()
assert len(res) == 1
assert res[0].id == 1
assert res[0].txn_id == 2
assert res[0].ofx_account_id == 2
assert res[0].ofx_fitid == 'OFX2'
res = testdb.query(TxnReconcile).all()
assert len(res) == 1
assert max([r.id for r in res]) == 1
def test_32_verify_columns(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
ofxtrans_div = selenium.find_element_by_id('ofx-panel')
actual_ofx = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in ofxtrans_div.find_elements_by_class_name('reconcile-ofx')
]
expected_ofx = [
ofx_div(
date(2017, 4, 10),
Decimal('-192.86'),
'BankTwo', 2,
'Debit',
'OFX31',
'ofx2-trans31'
),
ofx_div(
date(2017, 4, 11),
Decimal('-251.23'),
'BankTwo', 2,
'Debit',
'OFX30',
'ofx2-trans30'
)
]
assert expected_ofx == actual_ofx
def test_33_ignore_ofx(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
ofxdiv = selenium.find_element_by_id('ofx-2-OFX31')
link = ofxdiv.find_element_by_xpath('//a[text()="(ignore)"]')
# test the modal population
modal, title, body = self.try_click_and_get_modal(selenium, link)
self.assert_modal_displayed(modal, title, body)
assert title.text == 'Ignore OFXTransaction (2, "OFX31")'
assert body.find_element_by_id(
'trans_frm_acct_id').get_attribute('value') == '2'
assert body.find_element_by_id(
'trans_frm_fitid').get_attribute('value') == 'OFX31'
notes = selenium.find_element_by_id('trans_frm_note')
assert notes.get_attribute(
'value') == ''
notes.send_keys('My Note')
# submit the form
selenium.find_element_by_id('modalSaveButton').click()
self.wait_for_jquery_done(selenium)
sleep(1)
# check that modal was hidden
modal, title, body = self.get_modal_parts(selenium, wait=False)
self.assert_modal_hidden(modal, title, body)
# check that the JS variable has been updated
res = selenium.execute_script('return JSON.stringify(ofxIgnored);')
assert json.loads(res.strip()) == {'2%OFX31': 'My Note'}
# check that the OFX div has been updated
ofxtrans_div = selenium.find_element_by_id('ofx-panel')
actual_ofx = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in ofxtrans_div.find_elements_by_class_name('reconcile-ofx')
]
expected_ofx = [
ofx_div(
date(2017, 4, 10),
Decimal('-192.86'),
'BankTwo', 2,
'Debit',
'OFX31',
'ofx2-trans31',
ignored_reason='My Note'
),
ofx_div(
date(2017, 4, 11),
Decimal('-251.23'),
'BankTwo', 2,
'Debit',
'OFX30',
'ofx2-trans30'
)
]
assert expected_ofx == actual_ofx
# check that the OFX div is no longer draggable
ofxdiv = selenium.find_element_by_id('ofx-2-OFX31')
assert 'ui-draggable-disabled' in ofxdiv.get_attribute('class')
# wait for submit button to be visible and clickable, and click it
self.wait_for_jquery_done(selenium)
WebDriverWait(selenium, 10).until(
EC.invisibility_of_element_located((By.ID, 'modalDiv'))
)
WebDriverWait(selenium, 10).until(
EC.element_to_be_clickable((By.ID, 'reconcile-submit'))
)
selenium.find_element_by_id('reconcile-submit').click()
sleep(1)
self.wait_for_jquery_done(selenium)
msg = selenium.find_element_by_id('reconcile-msg')
assert msg.text == 'Successfully reconciled 1 transactions'
assert 'alert-success' in msg.get_attribute('class')
def test_34_verify_db(self, testdb):
res = testdb.query(TxnReconcile).all()
assert len(res) == 2
assert max([r.id for r in res]) == 2
vals = {r.id: r for r in res}
tr = vals[2]
assert tr.txn_id is None
assert tr.ofx_account_id == 2
assert tr.ofx_fitid == 'OFX31'
assert tr.note == 'My Note'
def test_35_verify_columns(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
ofxtrans_div = selenium.find_element_by_id('ofx-panel')
actual_ofx = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in ofxtrans_div.find_elements_by_class_name('reconcile-ofx')
]
expected_ofx = [
ofx_div(
date(2017, 4, 11),
Decimal('-251.23'),
'BankTwo', 2,
'Debit',
'OFX30',
'ofx2-trans30'
)
]
assert expected_ofx == actual_ofx
def test_36_ignore_and_unignore_ofx(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
# check that the OFX div has been updated
ofxtrans_div = selenium.find_element_by_id('ofx-panel')
actual_ofx = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in ofxtrans_div.find_elements_by_class_name('reconcile-ofx')
]
expected_ofx = [
ofx_div(
date(2017, 4, 11),
Decimal('-251.23'),
'BankTwo', 2,
'Debit',
'OFX30',
'ofx2-trans30'
)
]
assert expected_ofx == actual_ofx
# ignore
ofxdiv = selenium.find_element_by_id('ofx-2-OFX30')
link = ofxdiv.find_element_by_xpath('//a[text()="(ignore)"]')
# test the modal population
modal, title, body = self.try_click_and_get_modal(selenium, link)
self.assert_modal_displayed(modal, title, body)
assert title.text == 'Ignore OFXTransaction (2, "OFX30")'
assert body.find_element_by_id(
'trans_frm_acct_id').get_attribute('value') == '2'
assert body.find_element_by_id(
'trans_frm_fitid').get_attribute('value') == 'OFX30'
notes = selenium.find_element_by_id('trans_frm_note')
assert notes.get_attribute(
'value') == ''
notes.send_keys('My Note')
# submit the form
selenium.find_element_by_id('modalSaveButton').click()
self.wait_for_jquery_done(selenium)
sleep(1)
# check that modal was hidden
modal, title, body = self.get_modal_parts(selenium, wait=False)
self.assert_modal_hidden(modal, title, body)
# check that the JS variable has been updated
res = selenium.execute_script('return JSON.stringify(ofxIgnored);')
assert json.loads(res.strip()) == {'2%OFX30': 'My Note'}
# check that the OFX div has been updated
ofxtrans_div = selenium.find_element_by_id('ofx-panel')
actual_ofx = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in ofxtrans_div.find_elements_by_class_name('reconcile-ofx')
]
expected_ofx = [
ofx_div(
date(2017, 4, 11),
Decimal('-251.23'),
'BankTwo', 2,
'Debit',
'OFX30',
'ofx2-trans30',
ignored_reason='My Note'
)
]
assert expected_ofx == actual_ofx
# check that the OFX div is no longer draggable
ofxdiv = selenium.find_element_by_id('ofx-2-OFX30')
assert 'ui-draggable-disabled' in ofxdiv.get_attribute('class')
# ok, now Unignore
ofxdiv = selenium.find_element_by_id('ofx-2-OFX30')
link = ofxdiv.find_element_by_xpath('//a[text()="Unignore"]')
link.click()
# and test that everything was reverted...
res = selenium.execute_script('return JSON.stringify(ofxIgnored);')
assert json.loads(res.strip()) == {}
# check that the OFX div has been updated
ofxtrans_div = selenium.find_element_by_id('ofx-panel')
actual_ofx = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in ofxtrans_div.find_elements_by_class_name('reconcile-ofx')
]
expected_ofx = [
ofx_div(
date(2017, 4, 11),
Decimal('-251.23'),
'BankTwo', 2,
'Debit',
'OFX30',
'ofx2-trans30'
)
]
assert expected_ofx == actual_ofx
# check that the OFX div is no longer draggable
ofxdiv = selenium.find_element_by_id('ofx-2-OFX30')
assert 'ui-draggable-disabled' not in ofxdiv.get_attribute('class')
@pytest.mark.acceptance
@pytest.mark.usefixtures('class_refresh_db', 'refreshdb')
@pytest.mark.incremental
class TestTransReconcileNoOfx(ReconcileHelper):
def test_06_transactions_column(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
trans_div = selenium.find_element_by_id('trans-panel')
actual_trans = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in trans_div.find_elements_by_class_name('reconcile-trans')
]
expected_trans = [
txn_div(
1,
date(2017, 4, 10),
-100,
'BankOne', 1,
'1Income', 1,
'income'
),
txn_div(
2,
date(2017, 4, 10),
250,
'BankOne', 1,
'3Periodic', 3,
'trans1'
),
txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2'
),
txn_div(
4,
date(2017, 4, 14),
10,
'BankTwo', 2,
'3Periodic', 3,
'trans3'
),
txn_div(
5,
date(2017, 4, 16),
25,
'BankTwo', 2,
'3Periodic', 3,
'trans4'
),
txn_div(
6,
date(2017, 4, 17),
25,
'BankTwo', 2,
'3Periodic', 3,
'trans5'
)
]
assert actual_trans == expected_trans
def test_07_ofx_column(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
ofxtrans_div = selenium.find_element_by_id('ofx-panel')
actual_ofx = [
self.normalize_html(x.get_attribute('outerHTML'))
for x in ofxtrans_div.find_elements_by_class_name('reconcile-ofx')
]
expected_ofx = [
ofx_div(
date(2017, 4, 9),
Decimal('600.00'),
'BankTwo', 2,
'Purchase',
'OFX3',
'ofx3-trans2-st1'
),
ofx_div(
date(2017, 4, 10),
-100,
'BankOne', 1,
'Deposit',
'OFX1',
'ofx1-income'
),
ofx_div(
date(2017, 4, 11),
250,
'BankOne', 1,
'Debit',
'OFX2',
'ofx2-trans1'
),
ofx_div(
date(2017, 4, 14),
10,
'BankOne', 1,
'Purchase',
'OFXT4',
'ofx4-st2'
),
ofx_div(
date(2017, 4, 16),
10,
'BankOne', 1,
'Foo',
'OFXT5',
'ofx5'
),
ofx_div(
date(2017, 4, 16),
25,
'BankTwo', 2,
'Foo',
'OFXT6',
'ofx6'
),
ofx_div(
date(2017, 4, 17),
25,
'BankTwo', 2,
'Foo',
'OFXT7',
'ofx7'
)
]
assert expected_ofx == actual_ofx
def test_08_reconcile_unreconcile_noOFX_visible(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
# check Trans and OFX
trans = selenium.find_element_by_id('trans-3')
assert self.normalize_html(trans.get_attribute('outerHTML')) == txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2'
)
ofx = selenium.find_element_by_id('ofx-2-OFX3')
assert self.normalize_html(ofx.get_attribute('outerHTML')) == ofx_div(
date(2017, 4, 9),
Decimal('600.00'),
'BankTwo', 2,
'Purchase',
'OFX3',
'ofx3-trans2-st1'
)
# drag and drop
chain = ActionChains(selenium)
chain.drag_and_drop(
selenium.find_element_by_id('ofx-2-OFX3'),
selenium.find_element_by_id(
'trans-3'
).find_element_by_class_name('reconcile-drop-target')
).perform()
# ensure the reconciled variable was updated
assert self.get_reconciled(selenium) == {
3: [2, 'OFX3']
}
# check Trans and OFX
trans = selenium.find_element_by_id('trans-3')
assert self.normalize_html(trans.get_attribute('outerHTML')) == txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2',
drop_div=ofx_div(
date(2017, 4, 9),
Decimal('600.00'),
'BankTwo', 2,
'Purchase',
'OFX3',
'ofx3-trans2-st1',
trans_id=3
)
)
ofx = selenium.find_element_by_id('ofx-2-OFX3')
assert ofx.is_displayed() is False
# unreconcile
trans.find_element_by_xpath('//a[text()="Unreconcile"]').click()
sleep(1)
self.wait_for_jquery_done(selenium)
# check Trans and OFX
trans = selenium.find_element_by_id('trans-3')
assert self.normalize_html(trans.get_attribute('outerHTML')) == txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2'
)
ofx = selenium.find_element_by_id('ofx-2-OFX3')
assert self.normalize_html(ofx.get_attribute('outerHTML')) == ofx_div(
date(2017, 4, 9),
Decimal('600.00'),
'BankTwo', 2,
'Purchase',
'OFX3',
'ofx3-trans2-st1'
)
# ensure the reconciled variable was updated
assert self.get_reconciled(selenium) == {}
# click submit button
selenium.find_element_by_id('reconcile-submit').click()
sleep(1)
self.wait_for_jquery_done(selenium)
assert self.get_reconciled(selenium) == {}
msg = selenium.find_element_by_id('reconcile-msg')
assert msg.text == 'Warning: No reconciled transactions; ' \
'did not submit form.'
assert 'alert-warning' in msg.get_attribute('class')
def test_09_reconcile_unreconcile_noOFX(self, base_url, selenium):
self.get(selenium, base_url + '/reconcile')
# check Trans and OFX
trans = selenium.find_element_by_id('trans-3')
assert self.normalize_html(trans.get_attribute('outerHTML')) == txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2'
)
ofx = selenium.find_element_by_id('ofx-2-OFX3')
assert self.normalize_html(ofx.get_attribute('outerHTML')) == ofx_div(
date(2017, 4, 9),
Decimal('600.00'),
'BankTwo', 2,
'Purchase',
'OFX3',
'ofx3-trans2-st1'
)
assert self.get_reconciled(selenium) == {}
# reconcile as noOFX
modal, title, body = self.try_click_and_get_modal(
selenium, trans.find_element_by_link_text('(no OFX)')
)
self.assert_modal_displayed(modal, title, body)
assert title.text == 'Reconcile Transaction 3 Without OFX'
assert body.find_element_by_id(
'trans_frm_id').get_attribute('value') == '3'
note = body.find_element_by_id('trans_frm_note')
note.clear()
note.send_keys('My Trans Note')
# submit the form
selenium.find_element_by_id('modalSaveButton').click()
sleep(1)
self.wait_for_jquery_done(selenium)
# assert modal is hidden
assert selenium.find_element_by_id('modalDiv').is_displayed() is False
# test trans div was updated
noofx_div = '<div style="text-align: right;"><a href="' \
'javascript:reconcileDoUnreconcileNoOfx(3)">' \
'Unreconcile</a></div><div class="reconcile" ' \
'id="trans-3-noOFX" style=""><p><strong>No OFX:</strong>' \
' My Trans Note</p></div>'
trans = selenium.find_element_by_id('trans-3')
assert self.normalize_html(trans.get_attribute('outerHTML')) == txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2',
drop_div=noofx_div
)
# test that reconciled var was updated
assert self.get_reconciled(selenium) == {3: 'My Trans Note'}
# unreconcile
trans.find_element_by_link_text('Unreconcile').click()
trans = selenium.find_element_by_id('trans-3')
assert self.normalize_html(trans.get_attribute('outerHTML')) == txn_div(
3,
date(2017, 4, 11),
600,
'BankTwo', 2,
[
['2Periodic', 2, '$590.00'],
['3Periodic', 3, '$10.00']
],
None,
'trans2'
)
assert self.get_reconciled(selenium) == {}
|
iuliat/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/276_vcpu_model.py
|
81
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
BASE_TABLE_NAME = 'instance_extra'
NEW_COLUMN_NAME = 'vcpu_model'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
|
Sonicbids/django
|
refs/heads/master
|
django/bin/unique-messages.py
|
67
|
#!/usr/bin/env python
import os
import sys
def unique_messages():
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print("This script should be run from the Django Git tree or your project or app tree.")
sys.exit(1)
for (dirpath, dirnames, filenames) in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
cmd = 'msguniq "%s.po"' % pf
stdout = os.popen(cmd)
msg = stdout.read()
with open('%s.po' % pf, 'w') as fp:
fp.write(msg)
if __name__ == "__main__":
unique_messages()
|
fritz-k/django-wiki
|
refs/heads/master
|
wiki/apps.py
|
18
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class NotifcationsConfig(AppConfig):
name = 'wiki.plugins.notifications'
verbose_name = _("Wiki notifications")
label = 'wiki_notifications'
class ImagesConfig(AppConfig):
name = 'wiki.plugins.images'
verbose_name = _("Wiki images")
label = 'wiki_images'
class AttachmentsConfig(AppConfig):
name = 'wiki.plugins.attachments'
verbose_name = _("Wiki attachments")
label = 'wiki_attachments'
|
ibmsoe/tensorflow
|
refs/heads/master
|
tensorflow/contrib/factorization/python/ops/factorization_ops_test.py
|
32
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for factorization_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import factorization_ops
from tensorflow.contrib.factorization.python.ops import factorization_ops_test_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
INPUT_MATRIX = factorization_ops_test_utils.INPUT_MATRIX
np_matrix_to_tf_sparse = factorization_ops_test_utils.np_matrix_to_tf_sparse
class WalsModelTest(test.TestCase):
def sparse_input(self):
return np_matrix_to_tf_sparse(INPUT_MATRIX)
def count_rows(self, sp_input):
return math_ops.cast(
array_ops.shape(array_ops.unique(sp_input.indices[:, 0])[0])[0],
dtypes.float32)
def count_cols(self, sp_input):
return math_ops.cast(
array_ops.shape(array_ops.unique(sp_input.indices[:, 1])[0])[0],
dtypes.float32)
def calculate_loss_from_wals_model(self, wals_model, sp_inputs):
current_rows = embedding_ops.embedding_lookup(
wals_model.row_factors, math_ops.range(wals_model._input_rows),
partition_strategy="div")
current_cols = embedding_ops.embedding_lookup(
wals_model.col_factors, math_ops.range(wals_model._input_cols),
partition_strategy="div")
row_wts = embedding_ops.embedding_lookup(
wals_model._row_weights, math_ops.range(wals_model._input_rows),
partition_strategy="div")
col_wts = embedding_ops.embedding_lookup(
wals_model._col_weights, math_ops.range(wals_model._input_cols),
partition_strategy="div")
return factorization_ops_test_utils.calculate_loss(
sp_inputs, current_rows, current_cols, wals_model._regularization,
wals_model._unobserved_weight, row_wts, col_wts)
def setUp(self):
self.col_init = [
# shard 0
[[-0.36444709, -0.39077035, -0.32528427],
[1.19056475, 0.07231052, 2.11834812],
[0.93468881, -0.71099287, 1.91826844]],
# shard 1
[[1.18160152, 1.52490723, -0.50015002],
[1.82574749, -0.57515913, -1.32810032]],
# shard 2
[[-0.15515432, -0.84675711, 0.13097958],
[-0.9246484, 0.69117504, 1.2036494]]
]
self.row_wts = [[0.1, 0.2, 0.3], [0.4, 0.5]]
self.col_wts = [[0.1, 0.2, 0.3], [0.4, 0.5], [0.6, 0.7]]
# Values of factor shards after running one iteration of row and column
# updates.
self._row_factors_0 = [[0.097689, -0.219293, -0.020780],
[0.50842, 0.64626, 0.22364],
[0.401159, -0.046558, -0.192854]]
self._row_factors_1 = [[1.20597, -0.48025, 0.35582],
[1.5564, 1.2528, 1.0528]]
self._col_factors_0 = [[2.4725, -1.2950, -1.9980],
[0.44625, 1.50771, 1.27118],
[1.39801, -2.10134, 0.73572]]
self._col_factors_1 = [[3.36509, -0.66595, -3.51208],
[0.57191, 1.59407, 1.33020]]
self._col_factors_2 = [[3.3459, -1.3341, -3.3008],
[0.57366, 1.83729, 1.26798]]
def _run_test_process_input(self,
use_factors_weights_cache,
compute_loss=False):
with ops.Graph().as_default(), self.test_session() as sess:
self._wals_inputs = self.sparse_input()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
num_rows = 5
num_cols = 7
factor_dim = 3
wals_model = factorization_ops.WALSModel(
num_rows,
num_cols,
factor_dim,
num_row_shards=2,
num_col_shards=3,
regularization=0.01,
unobserved_weight=0.1,
col_init=self.col_init,
row_weights=self.row_wts,
col_weights=self.col_wts,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
# Split input into multiple sparse tensors with scattered rows. Note that
# this split can be different than the factor sharding and the inputs can
# consist of non-consecutive rows. Each row needs to include all non-zero
# elements in that row.
sp_r0 = np_matrix_to_tf_sparse(INPUT_MATRIX, [0, 2]).eval()
sp_r1 = np_matrix_to_tf_sparse(INPUT_MATRIX, [1, 4], shuffle=True).eval()
sp_r2 = np_matrix_to_tf_sparse(INPUT_MATRIX, [3], shuffle=True).eval()
input_scattered_rows = [sp_r0, sp_r1, sp_r2]
# Test updating row factors.
# Here we feed in scattered rows of the input.
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
_, process_input_op, factor_loss = wals_model.update_row_factors(
sp_input=sp_feeder, transpose_input=False)
for inp in input_scattered_rows:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
row_factors = [x.eval() for x in wals_model.row_factors]
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Test row projection.
# Using the specified projection weights for the 2 row feature vectors.
# This is expected to reprodue the same row factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_rows = wals_model.project_row_factors(
sp_input=sp_feeder,
transpose_input=False,
projection_weights=[0.2, 0.5])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_rows_no_weights = wals_model.project_row_factors(
sp_input=sp_feeder, transpose_input=False)
feed_dict = {
sp_feeder:
np_matrix_to_tf_sparse(
INPUT_MATRIX, [1, 4], shuffle=False).eval()
}
self.assertAllClose(
projected_rows.eval(feed_dict=feed_dict),
[self._row_factors_0[1], self._row_factors_1[1]],
atol=1e-3)
self.assertAllClose(
projected_rows_no_weights.eval(feed_dict=feed_dict),
[[0.569082, 0.715088, 0.31777], [1.915879, 1.992677, 1.109057]],
atol=1e-3)
if compute_loss:
# Test loss computation after the row update
loss = sum(
sess.run(factor_loss * self.count_rows(inp) / num_rows,
feed_dict={sp_feeder: inp})
for inp in input_scattered_rows)
true_loss = self.calculate_loss_from_wals_model(
wals_model, self._wals_inputs)
self.assertNear(
loss, true_loss, err=.001,
msg="""After row update, computed loss = {}, does not match
the true loss = {}.""".format(loss, true_loss))
# Split input into multiple sparse tensors with scattered columns. Note
# that here the elements in the sparse tensors are not ordered and also
# do not need to consist of consecutive columns. However, each column
# needs to include all non-zero elements in that column.
sp_c0 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[2, 0]).eval()
sp_c1 = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[5, 3, 1], shuffle=True).eval()
sp_c2 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[4, 6]).eval()
sp_c3 = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[3, 6], shuffle=True).eval()
input_scattered_cols = [sp_c0, sp_c1, sp_c2, sp_c3]
input_scattered_cols_non_duplicate = [sp_c0, sp_c1, sp_c2]
# Test updating column factors.
# Here we feed in scattered columns of the input.
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
_, process_input_op, factor_loss = wals_model.update_col_factors(
sp_input=sp_feeder, transpose_input=False)
for inp in input_scattered_cols:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
col_factors = [x.eval() for x in wals_model.col_factors]
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
# Test column projection.
# Using the specified projection weights for the 3 column feature vectors.
# This is expected to reprodue the same column factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_cols = wals_model.project_col_factors(
sp_input=sp_feeder,
transpose_input=False,
projection_weights=[0.6, 0.4, 0.2])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_cols_no_weights = wals_model.project_col_factors(
sp_input=sp_feeder, transpose_input=False)
feed_dict = {
sp_feeder:
np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[5, 3, 1], shuffle=False).eval()
}
self.assertAllClose(
projected_cols.eval(feed_dict=feed_dict), [
self._col_factors_2[0], self._col_factors_1[0],
self._col_factors_0[1]
],
atol=1e-3)
self.assertAllClose(
projected_cols_no_weights.eval(feed_dict=feed_dict),
[[3.471045, -1.250835, -3.598917],
[3.585139, -0.487476, -3.852232],
[0.346433, 1.360644, 1.677121]],
atol=1e-3)
if compute_loss:
# Test loss computation after the column update.
loss = sum(
sess.run(factor_loss * self.count_cols(inp) / num_cols,
feed_dict={sp_feeder: inp})
for inp in input_scattered_cols_non_duplicate)
true_loss = self.calculate_loss_from_wals_model(
wals_model, self._wals_inputs)
self.assertNear(
loss, true_loss, err=.001,
msg="""After col update, computed loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
def _run_test_process_input_transposed(self, use_factors_weights_cache,
compute_loss=False):
with ops.Graph().as_default(), self.test_session() as sess:
self._wals_inputs = self.sparse_input()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
num_rows = 5
num_cols = 7
factor_dim = 3
wals_model = factorization_ops.WALSModel(
num_rows,
num_cols,
factor_dim,
num_row_shards=2,
num_col_shards=3,
regularization=0.01,
unobserved_weight=0.1,
col_init=self.col_init,
row_weights=self.row_wts,
col_weights=self.col_wts,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
# Split input into multiple SparseTensors with scattered rows.
# Here the inputs are transposed. But the same constraints as described in
# the previous non-transposed test case apply to these inputs (before they
# are transposed).
sp_r0_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, [0, 3], transpose=True).eval()
sp_r1_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, [4, 1], shuffle=True, transpose=True).eval()
sp_r2_t = np_matrix_to_tf_sparse(INPUT_MATRIX, [2], transpose=True).eval()
sp_r3_t = sp_r1_t
input_scattered_rows = [sp_r0_t, sp_r1_t, sp_r2_t, sp_r3_t]
input_scattered_rows_non_duplicate = [sp_r0_t, sp_r1_t, sp_r2_t]
# Test updating row factors.
# Here we feed in scattered rows of the input.
# Note that the needed suffix of placeholder are in the order of test
# case name lexicographical order and then in the line order of where
# they appear.
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
_, process_input_op, factor_loss = wals_model.update_row_factors(
sp_input=sp_feeder, transpose_input=True)
for inp in input_scattered_rows:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
row_factors = [x.eval() for x in wals_model.row_factors]
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Test row projection.
# Using the specified projection weights for the 2 row feature vectors.
# This is expected to reprodue the same row factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_rows = wals_model.project_row_factors(
sp_input=sp_feeder,
transpose_input=True,
projection_weights=[0.5, 0.2])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_rows_no_weights = wals_model.project_row_factors(
sp_input=sp_feeder, transpose_input=True)
feed_dict = {
sp_feeder:
np_matrix_to_tf_sparse(
INPUT_MATRIX, [4, 1], shuffle=False, transpose=True).eval()
}
self.assertAllClose(
projected_rows.eval(feed_dict=feed_dict),
[self._row_factors_1[1], self._row_factors_0[1]],
atol=1e-3)
self.assertAllClose(
projected_rows_no_weights.eval(feed_dict=feed_dict),
[[1.915879, 1.992677, 1.109057], [0.569082, 0.715088, 0.31777]],
atol=1e-3)
if compute_loss:
# Test loss computation after the row update
loss = sum(
sess.run(factor_loss * self.count_cols(inp) / num_rows,
feed_dict={sp_feeder: inp})
for inp in input_scattered_rows_non_duplicate)
true_loss = self.calculate_loss_from_wals_model(
wals_model, self._wals_inputs)
self.assertNear(
loss, true_loss, err=.001,
msg="""After row update, computed loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
# Split input into multiple SparseTensors with scattered columns.
# Here the inputs are transposed. But the same constraints as described in
# the previous non-transposed test case apply to these inputs (before they
# are transposed).
sp_c0_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[0, 1], transpose=True).eval()
sp_c1_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[4, 2], transpose=True).eval()
sp_c2_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[5], transpose=True, shuffle=True).eval()
sp_c3_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[3, 6], transpose=True).eval()
sp_c4_t = sp_c2_t
input_scattered_cols = [sp_c0_t, sp_c1_t, sp_c2_t, sp_c3_t, sp_c4_t]
input_scattered_cols_non_duplicate = [sp_c0_t, sp_c1_t, sp_c2_t, sp_c3_t]
# Test updating column factors.
# Here we feed in scattered columns of the input.
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
_, process_input_op, factor_loss = wals_model.update_col_factors(
sp_input=sp_feeder, transpose_input=True)
for inp in input_scattered_cols:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
col_factors = [x.eval() for x in wals_model.col_factors]
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
# Test column projection.
# Using the specified projection weights for the 2 column feature vectors.
# This is expected to reprodue the same column factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_cols = wals_model.project_col_factors(
sp_input=sp_feeder,
transpose_input=True,
projection_weights=[0.4, 0.7])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_cols_no_weights = wals_model.project_col_factors(
sp_input=sp_feeder, transpose_input=True)
feed_dict = {sp_feeder: sp_c3_t}
self.assertAllClose(
projected_cols.eval(feed_dict=feed_dict),
[self._col_factors_1[0], self._col_factors_2[1]],
atol=1e-3)
self.assertAllClose(
projected_cols_no_weights.eval(feed_dict=feed_dict),
[[3.585139, -0.487476, -3.852232],
[0.557937, 1.813907, 1.331171]],
atol=1e-3)
if compute_loss:
# Test loss computation after the col update
loss = sum(
sess.run(factor_loss * self.count_rows(inp) / num_cols,
feed_dict={sp_feeder: inp})
for inp in input_scattered_cols_non_duplicate)
true_loss = self.calculate_loss_from_wals_model(
wals_model, self._wals_inputs)
self.assertNear(
loss, true_loss, err=.001,
msg="""After col update, computed loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
# Note that when row_weights and col_weights are 0, WALS gives identical
# results as ALS (Alternating Least Squares). However our implementation does
# not handle the case of zero weights differently. Instead, when row_weights
# and col_weights are set to None, we interpret that as the ALS case, and
# trigger the more efficient ALS updates.
# Here we test that those two give identical results.
def _run_test_als(self, use_factors_weights_cache):
with ops.Graph().as_default(), self.test_session():
self._wals_inputs = self.sparse_input()
col_init = np.random.rand(7, 3)
als_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
als_model.initialize_op.run()
als_model.worker_init.run()
als_model.row_update_prep_gramian_op.run()
als_model.initialize_row_update_op.run()
process_input_op = als_model.update_row_factors(self._wals_inputs)[1]
process_input_op.run()
row_factors1 = [x.eval() for x in als_model.row_factors]
# Testing row projection. Projection weight doesn't matter in this case
# since the model is ALS special case.
als_projected_row_factors1 = als_model.project_row_factors(
self._wals_inputs).eval()
wals_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=0,
col_weights=0,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(self._wals_inputs)[1]
process_input_op.run()
row_factors2 = [x.eval() for x in wals_model.row_factors]
for r1, r2 in zip(row_factors1, row_factors2):
self.assertAllClose(r1, r2, atol=1e-3)
self.assertAllClose(
als_projected_row_factors1,
[row for shard in row_factors2 for row in shard],
atol=1e-3)
# Here we test partial column updates.
sp_c = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[2, 0], shuffle=True).eval()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
feed_dict = {sp_feeder: sp_c}
als_model.col_update_prep_gramian_op.run()
als_model.initialize_col_update_op.run()
process_input_op = als_model.update_col_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
col_factors1 = [x.eval() for x in als_model.col_factors]
# Testing column projection. Projection weight doesn't matter in this case
# since the model is ALS special case.
als_projected_col_factors1 = als_model.project_col_factors(
np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[2, 0], shuffle=False)).eval()
feed_dict = {sp_feeder: sp_c}
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
process_input_op = wals_model.update_col_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
col_factors2 = [x.eval() for x in wals_model.col_factors]
for c1, c2 in zip(col_factors1, col_factors2):
self.assertAllClose(c1, c2, rtol=5e-3, atol=1e-2)
self.assertAllClose(
als_projected_col_factors1,
[col_factors2[0][2], col_factors2[0][0]],
atol=1e-2)
def _run_test_als_transposed(self, use_factors_weights_cache):
with ops.Graph().as_default(), self.test_session():
self._wals_inputs = self.sparse_input()
col_init = np.random.rand(7, 3)
als_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
als_model.initialize_op.run()
als_model.worker_init.run()
wals_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=[0] * 5,
col_weights=[0] * 7,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
# Here test partial row update with identical inputs but with transposed
# input for als.
sp_r_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, [3, 1], transpose=True).eval()
sp_r = np_matrix_to_tf_sparse(INPUT_MATRIX, [3, 1]).eval()
feed_dict = {sp_feeder: sp_r_t}
als_model.row_update_prep_gramian_op.run()
als_model.initialize_row_update_op.run()
process_input_op = als_model.update_row_factors(
sp_input=sp_feeder, transpose_input=True)[1]
process_input_op.run(feed_dict=feed_dict)
# Only updated row 1 and row 3, so only compare these rows since others
# have randomly initialized values.
row_factors1 = [
als_model.row_factors[0].eval()[1], als_model.row_factors[0].eval()[3]
]
# Testing row projection. Projection weight doesn't matter in this case
# since the model is ALS special case. Note that the ordering of the
# returned results will be preserved as the input feature vectors
# ordering.
als_projected_row_factors1 = als_model.project_row_factors(
sp_input=sp_feeder, transpose_input=True).eval(feed_dict=feed_dict)
feed_dict = {sp_feeder: sp_r}
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
# Only updated row 1 and row 3, so only compare these rows since others
# have randomly initialized values.
row_factors2 = [
wals_model.row_factors[0].eval()[1],
wals_model.row_factors[0].eval()[3]
]
for r1, r2 in zip(row_factors1, row_factors2):
self.assertAllClose(r1, r2, atol=1e-3)
# Note that the ordering of the returned projection results is preserved
# as the input feature vectors ordering.
self.assertAllClose(
als_projected_row_factors1, [row_factors2[1], row_factors2[0]],
atol=1e-3)
def simple_train(self, model, inp, num_iterations):
"""Helper function to train model on inp for num_iterations."""
row_update_op = model.update_row_factors(sp_input=inp)[1]
col_update_op = model.update_col_factors(sp_input=inp)[1]
model.initialize_op.run()
model.worker_init.run()
for _ in xrange(num_iterations):
model.row_update_prep_gramian_op.run()
model.initialize_row_update_op.run()
row_update_op.run()
model.col_update_prep_gramian_op.run()
model.initialize_col_update_op.run()
col_update_op.run()
# Trains an ALS model for a low-rank matrix and make sure the product of
# factors is close to the original input.
def _run_test_train_full_low_rank_als(self, use_factors_weights_cache):
rows = 15
cols = 11
dims = 3
with ops.Graph().as_default(), self.test_session():
data = np.dot(np.random.rand(rows, 3),
np.random.rand(3, cols)).astype(np.float32) / 3.0
indices = [[i, j] for i in xrange(rows) for j in xrange(cols)]
values = data.reshape(-1)
inp = sparse_tensor.SparseTensor(indices, values, [rows, cols])
model = factorization_ops.WALSModel(
rows,
cols,
dims,
regularization=1e-5,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
self.assertAllClose(
data,
np.dot(row_factor, np.transpose(col_factor)),
rtol=0.01,
atol=0.01)
# Trains a WALS model for a low-rank matrix and make sure the product of
# factors is close to the original input.
def _run_test_train_full_low_rank_wals(self, use_factors_weights_cache):
rows = 15
cols = 11
dims = 3
with ops.Graph().as_default(), self.test_session():
data = np.dot(np.random.rand(rows, 3),
np.random.rand(3, cols)).astype(np.float32) / 3.0
indices = [[i, j] for i in xrange(rows) for j in xrange(cols)]
values = data.reshape(-1)
inp = sparse_tensor.SparseTensor(indices, values, [rows, cols])
model = factorization_ops.WALSModel(
rows,
cols,
dims,
regularization=1e-5,
row_weights=0,
col_weights=[0] * cols,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
self.assertAllClose(
data,
np.dot(row_factor, np.transpose(col_factor)),
rtol=0.01,
atol=0.01)
# Trains a WALS model for a partially observed low-rank matrix and makes
# sure the product of factors is reasonably close to the original input.
def _run_test_train_matrix_completion_wals(self, use_factors_weights_cache):
rows = 11
cols = 9
dims = 4
def keep_index(x):
return not (x[0] + x[1]) % 4
with ops.Graph().as_default(), self.test_session():
row_wts = 0.1 + np.random.rand(rows)
col_wts = 0.1 + np.random.rand(cols)
data = np.dot(np.random.rand(rows, 3),
np.random.rand(3, cols)).astype(np.float32) / 3.0
indices = np.array(
list(
filter(keep_index,
[[i, j] for i in xrange(rows) for j in xrange(cols)])))
values = data[indices[:, 0], indices[:, 1]]
inp = sparse_tensor.SparseTensor(indices, values, [rows, cols])
model = factorization_ops.WALSModel(
rows,
cols,
dims,
unobserved_weight=0.01,
regularization=0.001,
row_weights=row_wts,
col_weights=col_wts,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
out = np.dot(row_factor, np.transpose(col_factor))
for i in xrange(rows):
for j in xrange(cols):
if keep_index([i, j]):
self.assertNear(
data[i][j], out[i][j], err=0.4, msg="%d, %d" % (i, j))
else:
self.assertNear(0, out[i][j], err=0.5, msg="%d, %d" % (i, j))
def test_process_input_with_cache(self):
self._run_test_process_input(True)
def test_process_input_without_cache(self):
self._run_test_process_input(False)
def test_process_input_transposed_with_cache(self):
self._run_test_process_input_transposed(True)
def test_process_input_transposed_without_cache(self):
self._run_test_process_input_transposed(False)
def test_als_with_cache(self):
self._run_test_als(True)
def test_als_without_cache(self):
self._run_test_als(False)
def test_als_transposed_with_cache(self):
self._run_test_als_transposed(True)
def test_als_transposed_without_cache(self):
self._run_test_als_transposed(False)
def test_train_full_low_rank_wals_with_cache(self):
self._run_test_train_full_low_rank_wals(True)
def test_train_full_low_rank_wals_without_cache(self):
self._run_test_train_full_low_rank_wals(False)
def test_train_matrix_completion_wals_with_cache(self):
self._run_test_train_matrix_completion_wals(True)
def test_train_matrix_completion_wals_without_cache(self):
self._run_test_train_matrix_completion_wals(False)
def test_loss_transposed_with_cache(self):
self._run_test_process_input_transposed(True, compute_loss=True)
def test_loss_transposed_without_cache(self):
self._run_test_process_input_transposed(False, compute_loss=True)
def test_loss_with_cache(self):
self._run_test_process_input(True, compute_loss=True)
def test_loss_without_cache(self):
self._run_test_process_input(False, compute_loss=True)
if __name__ == "__main__":
test.main()
|
johnardavies/scrapy
|
refs/heads/master
|
scrapy/core/downloader/handlers/http11.py
|
113
|
"""Download handlers for http and https schemes"""
import re
import logging
from io import BytesIO
from time import time
from six.moves.urllib.parse import urldefrag
from zope.interface import implements
from twisted.internet import defer, reactor, protocol
from twisted.web.http_headers import Headers as TxHeaders
from twisted.web.iweb import IBodyProducer, UNKNOWN_LENGTH
from twisted.internet.error import TimeoutError
from twisted.web.http import PotentialDataLoss
from scrapy.xlib.tx import Agent, ProxyAgent, ResponseDone, \
HTTPConnectionPool, TCP4ClientEndpoint
from scrapy.http import Headers
from scrapy.responsetypes import responsetypes
from scrapy.core.downloader.webclient import _parse
from scrapy.utils.misc import load_object
from scrapy import twisted_version
logger = logging.getLogger(__name__)
class HTTP11DownloadHandler(object):
def __init__(self, settings):
self._pool = HTTPConnectionPool(reactor, persistent=True)
self._pool.maxPersistentPerHost = settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
self._pool._factory.noisy = False
self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
self._contextFactory = self._contextFactoryClass()
self._default_maxsize = settings.getint('DOWNLOAD_MAXSIZE')
self._default_warnsize = settings.getint('DOWNLOAD_WARNSIZE')
self._disconnect_timeout = 1
def download_request(self, request, spider):
"""Return a deferred for the HTTP download"""
agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool,
maxsize=getattr(spider, 'download_maxsize', self._default_maxsize),
warnsize=getattr(spider, 'download_warnsize', self._default_warnsize))
return agent.download_request(request)
def close(self):
d = self._pool.closeCachedConnections()
# closeCachedConnections will hang on network or server issues, so
# we'll manually timeout the deferred.
#
# Twisted issue addressing this problem can be found here:
# https://twistedmatrix.com/trac/ticket/7738.
#
# closeCachedConnections doesn't handle external errbacks, so we'll
# issue a callback after `_disconnect_timeout` seconds.
delayed_call = reactor.callLater(self._disconnect_timeout, d.callback, [])
def cancel_delayed_call(result):
if delayed_call.active():
delayed_call.cancel()
return result
d.addBoth(cancel_delayed_call)
return d
class TunnelError(Exception):
"""An HTTP CONNECT tunnel could not be established by the proxy."""
class TunnelingTCP4ClientEndpoint(TCP4ClientEndpoint):
"""An endpoint that tunnels through proxies to allow HTTPS downloads. To
accomplish that, this endpoint sends an HTTP CONNECT to the proxy.
The HTTP CONNECT is always sent when using this endpoint, I think this could
be improved as the CONNECT will be redundant if the connection associated
with this endpoint comes from the pool and a CONNECT has already been issued
for it.
"""
_responseMatcher = re.compile('HTTP/1\.. 200')
def __init__(self, reactor, host, port, proxyConf, contextFactory,
timeout=30, bindAddress=None):
proxyHost, proxyPort, self._proxyAuthHeader = proxyConf
super(TunnelingTCP4ClientEndpoint, self).__init__(reactor, proxyHost,
proxyPort, timeout, bindAddress)
self._tunnelReadyDeferred = defer.Deferred()
self._tunneledHost = host
self._tunneledPort = port
self._contextFactory = contextFactory
def requestTunnel(self, protocol):
"""Asks the proxy to open a tunnel."""
tunnelReq = 'CONNECT %s:%s HTTP/1.1\r\n' % (self._tunneledHost,
self._tunneledPort)
if self._proxyAuthHeader:
tunnelReq += 'Proxy-Authorization: %s\r\n' % self._proxyAuthHeader
tunnelReq += '\r\n'
protocol.transport.write(tunnelReq)
self._protocolDataReceived = protocol.dataReceived
protocol.dataReceived = self.processProxyResponse
self._protocol = protocol
return protocol
def processProxyResponse(self, bytes):
"""Processes the response from the proxy. If the tunnel is successfully
created, notifies the client that we are ready to send requests. If not
raises a TunnelError.
"""
self._protocol.dataReceived = self._protocolDataReceived
if TunnelingTCP4ClientEndpoint._responseMatcher.match(bytes):
self._protocol.transport.startTLS(self._contextFactory,
self._protocolFactory)
self._tunnelReadyDeferred.callback(self._protocol)
else:
self._tunnelReadyDeferred.errback(
TunnelError('Could not open CONNECT tunnel.'))
def connectFailed(self, reason):
"""Propagates the errback to the appropriate deferred."""
self._tunnelReadyDeferred.errback(reason)
def connect(self, protocolFactory):
self._protocolFactory = protocolFactory
connectDeferred = super(TunnelingTCP4ClientEndpoint,
self).connect(protocolFactory)
connectDeferred.addCallback(self.requestTunnel)
connectDeferred.addErrback(self.connectFailed)
return self._tunnelReadyDeferred
class TunnelingAgent(Agent):
"""An agent that uses a L{TunnelingTCP4ClientEndpoint} to make HTTPS
downloads. It may look strange that we have chosen to subclass Agent and not
ProxyAgent but consider that after the tunnel is opened the proxy is
transparent to the client; thus the agent should behave like there is no
proxy involved.
"""
def __init__(self, reactor, proxyConf, contextFactory=None,
connectTimeout=None, bindAddress=None, pool=None):
super(TunnelingAgent, self).__init__(reactor, contextFactory,
connectTimeout, bindAddress, pool)
self._proxyConf = proxyConf
self._contextFactory = contextFactory
if twisted_version >= (15, 0, 0):
def _getEndpoint(self, uri):
return TunnelingTCP4ClientEndpoint(
self._reactor, uri.host, uri.port, self._proxyConf,
self._contextFactory, self._endpointFactory._connectTimeout,
self._endpointFactory._bindAddress)
else:
def _getEndpoint(self, scheme, host, port):
return TunnelingTCP4ClientEndpoint(
self._reactor, host, port, self._proxyConf,
self._contextFactory, self._connectTimeout,
self._bindAddress)
class ScrapyAgent(object):
_Agent = Agent
_ProxyAgent = ProxyAgent
_TunnelingAgent = TunnelingAgent
def __init__(self, contextFactory=None, connectTimeout=10, bindAddress=None, pool=None,
maxsize=0, warnsize=0):
self._contextFactory = contextFactory
self._connectTimeout = connectTimeout
self._bindAddress = bindAddress
self._pool = pool
self._maxsize = maxsize
self._warnsize = warnsize
def _get_agent(self, request, timeout):
bindaddress = request.meta.get('bindaddress') or self._bindAddress
proxy = request.meta.get('proxy')
if proxy:
_, _, proxyHost, proxyPort, proxyParams = _parse(proxy)
scheme = _parse(request.url)[0]
omitConnectTunnel = proxyParams.find('noconnect') >= 0
if scheme == 'https' and not omitConnectTunnel:
proxyConf = (proxyHost, proxyPort,
request.headers.get('Proxy-Authorization', None))
return self._TunnelingAgent(reactor, proxyConf,
contextFactory=self._contextFactory, connectTimeout=timeout,
bindAddress=bindaddress, pool=self._pool)
else:
endpoint = TCP4ClientEndpoint(reactor, proxyHost, proxyPort,
timeout=timeout, bindAddress=bindaddress)
return self._ProxyAgent(endpoint)
return self._Agent(reactor, contextFactory=self._contextFactory,
connectTimeout=timeout, bindAddress=bindaddress, pool=self._pool)
def download_request(self, request):
timeout = request.meta.get('download_timeout') or self._connectTimeout
agent = self._get_agent(request, timeout)
# request details
url = urldefrag(request.url)[0]
method = request.method
headers = TxHeaders(request.headers)
if isinstance(agent, self._TunnelingAgent):
headers.removeHeader('Proxy-Authorization')
bodyproducer = _RequestBodyProducer(request.body) if request.body else None
start_time = time()
d = agent.request(method, url, headers, bodyproducer)
# set download latency
d.addCallback(self._cb_latency, request, start_time)
# response body is ready to be consumed
d.addCallback(self._cb_bodyready, request)
d.addCallback(self._cb_bodydone, request, url)
# check download timeout
self._timeout_cl = reactor.callLater(timeout, d.cancel)
d.addBoth(self._cb_timeout, request, url, timeout)
return d
def _cb_timeout(self, result, request, url, timeout):
if self._timeout_cl.active():
self._timeout_cl.cancel()
return result
raise TimeoutError("Getting %s took longer than %s seconds." % (url, timeout))
def _cb_latency(self, result, request, start_time):
request.meta['download_latency'] = time() - start_time
return result
def _cb_bodyready(self, txresponse, request):
# deliverBody hangs for responses without body
if txresponse.length == 0:
return txresponse, '', None
maxsize = request.meta.get('download_maxsize', self._maxsize)
warnsize = request.meta.get('download_warnsize', self._warnsize)
expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1
if maxsize and expected_size > maxsize:
logger.error("Expected response size (%(size)s) larger than "
"download max size (%(maxsize)s).",
{'size': expected_size, 'maxsize': maxsize})
txresponse._transport._producer.loseConnection()
raise defer.CancelledError()
if warnsize and expected_size > warnsize:
logger.warning("Expected response size (%(size)s) larger than "
"download warn size (%(warnsize)s).",
{'size': expected_size, 'warnsize': warnsize})
def _cancel(_):
txresponse._transport._producer.loseConnection()
d = defer.Deferred(_cancel)
txresponse.deliverBody(_ResponseReader(d, txresponse, request, maxsize, warnsize))
return d
def _cb_bodydone(self, result, request, url):
txresponse, body, flags = result
status = int(txresponse.code)
headers = Headers(txresponse.headers.getAllRawHeaders())
respcls = responsetypes.from_args(headers=headers, url=url)
return respcls(url=url, status=status, headers=headers, body=body, flags=flags)
class _RequestBodyProducer(object):
implements(IBodyProducer)
def __init__(self, body):
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return defer.succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class _ResponseReader(protocol.Protocol):
def __init__(self, finished, txresponse, request, maxsize, warnsize):
self._finished = finished
self._txresponse = txresponse
self._request = request
self._bodybuf = BytesIO()
self._maxsize = maxsize
self._warnsize = warnsize
self._bytes_received = 0
def dataReceived(self, bodyBytes):
self._bodybuf.write(bodyBytes)
self._bytes_received += len(bodyBytes)
if self._maxsize and self._bytes_received > self._maxsize:
logger.error("Received (%(bytes)s) bytes larger than download "
"max size (%(maxsize)s).",
{'bytes': self._bytes_received,
'maxsize': self._maxsize})
self._finished.cancel()
if self._warnsize and self._bytes_received > self._warnsize:
logger.warning("Received (%(bytes)s) bytes larger than download "
"warn size (%(warnsize)s).",
{'bytes': self._bytes_received,
'warnsize': self._warnsize})
def connectionLost(self, reason):
if self._finished.called:
return
body = self._bodybuf.getvalue()
if reason.check(ResponseDone):
self._finished.callback((self._txresponse, body, None))
elif reason.check(PotentialDataLoss):
self._finished.callback((self._txresponse, body, ['partial']))
else:
self._finished.errback(reason)
|
gmuldoon/google-python-exercises
|
refs/heads/master
|
basic/solution/string1.py
|
210
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
# LAB(begin solution)
if count < 10:
return 'Number of donuts: ' + str(count)
else:
return 'Number of donuts: many'
# LAB(replace solution)
# return
# LAB(end solution)
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
# LAB(begin solution)
if len(s) < 2:
return ''
first2 = s[0:2]
last2 = s[-2:]
return first2 + last2
# LAB(replace solution)
# return
# LAB(end solution)
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
# LAB(begin solution)
front = s[0]
back = s[1:]
fixed_back = back.replace(front, '*')
return front + fixed_back
# LAB(replace solution)
# return
# LAB(end solution)
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
# LAB(begin solution)
a_swapped = b[:2] + a[2:]
b_swapped = a[:2] + b[2:]
return a_swapped + ' ' + b_swapped
# LAB(replace solution)
# return
# LAB(end solution)
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
|
redhat-cip/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/hypervisors/urls.py
|
66
|
# Copyright 2013 B1 Systems GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.hypervisors.compute \
import urls as compute_urls
from openstack_dashboard.dashboards.admin.hypervisors import views
urlpatterns = patterns(
'openstack_dashboard.dashboards.admin.hypervisors.views',
url(r'^(?P<hypervisor>[^/]+)/$',
views.AdminDetailView.as_view(),
name='detail'),
url(r'^$', views.AdminIndexView.as_view(), name='index'),
url(r'', include(compute_urls, namespace='compute')),
)
|
jillson/chrononaut
|
refs/heads/master
|
adventure/migrations/0004_auto_20151112_0419.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adventure', '0003_load_initial_data'),
]
operations = [
migrations.AlterField(
model_name='adventure',
name='UnlockTrigger',
field=models.ForeignKey(blank=True, to='adventure.Adventure', null=True),
),
]
|
microcosm-cc/microweb
|
refs/heads/master
|
huddles/models.py
|
10644
|
from django.db import models
# Create your models here.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.