repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
vmahuli/tempest | refs/heads/master | tempest/api/image/v2/test_images_member.py | 2 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest import test
class ImagesMemberTest(base.BaseV2MemberImageTest):
_interface = 'json'
@test.attr(type='gate')
def test_image_share_accept(self):
image_id = self._create_image()
resp, member = self.os_img_client.add_member(image_id,
self.alt_tenant_id)
self.assertEqual(200, resp.status)
self.assertEqual(member['member_id'], self.alt_tenant_id)
self.assertEqual(member['image_id'], image_id)
self.assertEqual(member['status'], 'pending')
self.assertNotIn(image_id, self._list_image_ids_as_alt())
self.alt_img_client.update_member_status(image_id,
self.alt_tenant_id,
'accepted')
self.assertIn(image_id, self._list_image_ids_as_alt())
resp, body = self.os_img_client.get_image_membership(image_id)
self.assertEqual(200, resp.status)
members = body['members']
member = members[0]
self.assertEqual(len(members), 1, str(members))
self.assertEqual(member['member_id'], self.alt_tenant_id)
self.assertEqual(member['image_id'], image_id)
self.assertEqual(member['status'], 'accepted')
@test.attr(type='gate')
def test_image_share_reject(self):
image_id = self._create_image()
resp, member = self.os_img_client.add_member(image_id,
self.alt_tenant_id)
self.assertEqual(200, resp.status)
self.assertEqual(member['member_id'], self.alt_tenant_id)
self.assertEqual(member['image_id'], image_id)
self.assertEqual(member['status'], 'pending')
self.assertNotIn(image_id, self._list_image_ids_as_alt())
resp, _ = self.alt_img_client.update_member_status(image_id,
self.alt_tenant_id,
'rejected')
self.assertEqual(200, resp.status)
self.assertNotIn(image_id, self._list_image_ids_as_alt())
@test.attr(type='gate')
def test_get_image_member(self):
image_id = self._create_image()
self.os_img_client.add_member(image_id,
self.alt_tenant_id)
self.alt_img_client.update_member_status(image_id,
self.alt_tenant_id,
'accepted')
self.assertIn(image_id, self._list_image_ids_as_alt())
resp, member = self.os_img_client.get_member(image_id,
self.alt_tenant_id)
self.assertEqual(200, resp.status)
self.assertEqual(self.alt_tenant_id, member['member_id'])
self.assertEqual(image_id, member['image_id'])
self.assertEqual('accepted', member['status'])
@test.attr(type='gate')
def test_remove_image_member(self):
image_id = self._create_image()
self.os_img_client.add_member(image_id,
self.alt_tenant_id)
self.alt_img_client.update_member_status(image_id,
self.alt_tenant_id,
'accepted')
self.assertIn(image_id, self._list_image_ids_as_alt())
resp = self.os_img_client.remove_member(image_id, self.alt_tenant_id)
self.assertEqual(204, resp.status)
self.assertNotIn(image_id, self._list_image_ids_as_alt())
@test.attr(type='gate')
def test_get_image_member_schema(self):
resp, body = self.os_img_client.get_schema("member")
self.assertEqual(200, resp.status)
self.assertEqual("member", body['name'])
@test.attr(type='gate')
def test_get_image_members_schema(self):
resp, body = self.os_img_client.get_schema("members")
self.assertEqual(200, resp.status)
self.assertEqual("members", body['name'])
|
Phonemetra/TurboCoin | refs/heads/master | test/functional/wallet_importprunedfunds.py | 1 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 TurboCoin
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importprunedfunds and removeprunedfunds RPCs."""
from decimal import Decimal
from test_framework.test_framework import TurbocoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class ImportPrunedFundsTest(TurbocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(101)
self.sync_all()
# address
address1 = self.nodes[0].getnewaddress()
# pubkey
address2 = self.nodes[0].getnewaddress()
# privkey
address3 = self.nodes[0].getnewaddress()
address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey
# Check only one address
address_info = self.nodes[0].getaddressinfo(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 101)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# Send funds to self
txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
self.nodes[0].generate(1)
rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
proof1 = self.nodes[0].gettxoutproof([txnid1])
txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
self.nodes[0].generate(1)
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.nodes[0].generate(1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
proof3 = self.nodes[0].gettxoutproof([txnid3])
self.sync_all()
# Import with no affiliated address
assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
balance1 = self.nodes[1].getbalance()
assert_equal(balance1, Decimal(0))
# Import with affiliated address with no rescan
self.nodes[1].importaddress(address=address2, rescan=False)
self.nodes[1].importprunedfunds(rawtransaction=rawtxn2, txoutproof=proof2)
assert [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid2]
# Import with private key with no rescan
self.nodes[1].importprivkey(privkey=address3_privkey, rescan=False)
self.nodes[1].importprunedfunds(rawtxn3, proof3)
assert [tx for tx in self.nodes[1].listtransactions() if tx['txid'] == txnid3]
balance3 = self.nodes[1].getbalance()
assert_equal(balance3, Decimal('0.025'))
# Addresses Test - after import
address_info = self.nodes[1].getaddressinfo(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address2)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
# Remove transactions
assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid1]
self.nodes[1].removeprunedfunds(txnid2)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid2]
self.nodes[1].removeprunedfunds(txnid3)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid3]
if __name__ == '__main__':
ImportPrunedFundsTest().main()
|
Orav/kbengine | refs/heads/master | kbe/src/lib/python/Doc/includes/email-read-alternative-new-api.py | 1 | import os
import sys
import tempfile
import mimetypes
import webbrowser
# Import the email modules we'll need
from email import policy
from email.parser import BytesParser
# An imaginary module that would make this work and be safe.
from imaginary import magic_html_parser
# In a real program you'd get the filename from the arguments.
msg = BytesParser(policy=policy.default).parse(open('outgoing.msg', 'rb'))
# Now the header items can be accessed as a dictionary, and any non-ASCII will
# be converted to unicode:
print('To:', msg['to'])
print('From:', msg['from'])
print('Subject:', msg['subject'])
# If we want to print a priview of the message content, we can extract whatever
# the least formatted payload is and print the first three lines. Of course,
# if the message has no plain text part printing the first three lines of html
# is probably useless, but this is just a conceptual example.
simplest = msg.get_body(preferencelist=('plain', 'html'))
print()
print(''.join(simplest.get_content().splitlines(keepends=True)[:3]))
ans = input("View full message?")
if ans.lower()[0] == 'n':
sys.exit()
# We can extract the richest alternative in order to display it:
richest = msg.get_body()
partfiles = {}
if richest['content-type'].maintype == 'text':
if richest['content-type'].subtype == 'plain':
for line in richest.get_content().splitlines():
print(line)
sys.exit()
elif richest['content-type'].subtype == 'html':
body = richest
else:
print("Don't know how to display {}".format(richest.get_content_type()))
sys.exit()
elif richest['content-type'].content_type == 'multipart/related':
body = richest.get_body(preferencelist=('html'))
for part in richest.iter_attachments():
fn = part.get_filename()
if fn:
extension = os.path.splitext(part.get_filename())[1]
else:
extension = mimetypes.guess_extension(part.get_content_type())
with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as f:
f.write(part.get_content())
# again strip the <> to go from email form of cid to html form.
partfiles[part['content-id'][1:-1]] = f.name
else:
print("Don't know how to display {}".format(richest.get_content_type()))
sys.exit()
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
# The magic_html_parser has to rewrite the href="cid:...." attributes to
# point to the filenames in partfiles. It also has to do a safety-sanitize
# of the html. It could be written using html.parser.
f.write(magic_html_parser(body.get_content(), partfiles))
webbrowser.open(f.name)
os.remove(f.name)
for fn in partfiles.values():
os.remove(fn)
# Of course, there are lots of email messages that could break this simple
# minded program, but it will handle the most common ones.
|
kevin8909/xjerp | refs/heads/master | openerp/addons/web_shortcuts/__init__.py | 54 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Opentopic/falcon-api | refs/heads/master | falcon_dbapi/resources/index.py | 1 | from urllib.parse import urljoin
import falcon
class IndexResource(object):
"""
Lists routes registered in current app.
"""
def __init__(self, routes):
"""
:param routes: list of routes registered in current API
:type routes: list
"""
self.routes = routes
def on_get(self, req, resp):
"""
Lists routes registered in current app.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
resp.body = [urljoin(req.uri, route) for route in self.routes]
resp.status = falcon.HTTP_200
|
thaumos/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage.py | 11 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_devtestlabcustomimage
version_added: "2.8"
short_description: Manage Azure DevTest Lab Custom Image instance.
description:
- Create, update and delete instance of Azure DevTest Lab Custom Image.
options:
resource_group:
description:
- The name of the resource group.
required: True
lab_name:
description:
- The name of the lab.
required: True
name:
description:
- The name of the custom image.
required: True
source_vm:
description:
- Source DevTest Lab virtual machine name.
windows_os_state:
description:
- The state of the Windows OS.
choices:
- 'non_sysprepped'
- 'sysprep_requested'
- 'sysprep_applied'
linux_os_state:
description:
- The state of the Linux OS.
choices:
- 'non_deprovisioned'
- 'deprovision_requested'
- 'deprovision_applied'
description:
description:
- The description of the custom image.
author:
description:
- The author of the custom image.
state:
description:
- Assert the state of the Custom Image.
- Use C(present) to create or update an Custom Image and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create instance of DevTest Lab Image
azure_rm_devtestlabcustomimage:
resource_group: myResourceGroup
lab_name: myLab
name: myImage
source_vm: myDevTestLabVm
linux_os_state: non_deprovisioned
'''
RETURN = '''
id:
description:
- The identifier of the resource.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/images/myImage"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.devtestlabs import DevTestLabsClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMDtlCustomImage(AzureRMModuleBase):
"""Configuration class for an Azure RM Custom Image resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
lab_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
source_vm=dict(
type='str'
),
windows_os_state=dict(
type='str',
choices=['non_sysprepped',
'sysprep_requested',
'sysprep_applied']
),
linux_os_state=dict(
type='str',
choices=['non_deprovisioned',
'deprovision_requested',
'deprovision_applied']
),
description=dict(
type='str'
),
author=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.lab_name = None
self.name = None
self.custom_image = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
required_if = [
('state', 'present', [
'source_vm'])
]
super(AzureRMDtlCustomImage, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True,
required_if=required_if)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.custom_image[key] = kwargs[key]
if self.state == 'present':
windows_os_state = self.custom_image.pop('windows_os_state', False)
linux_os_state = self.custom_image.pop('linux_os_state', False)
source_vm_name = self.custom_image.pop('source_vm')
temp = "/subscriptions/{0}/resourcegroups/{1}/providers/microsoft.devtestlab/labs/{2}/virtualmachines/{3}"
self.custom_image['vm'] = {}
self.custom_image['vm']['source_vm_id'] = temp.format(self.subscription_id, self.resource_group, self.lab_name, source_vm_name)
if windows_os_state:
self.custom_image['vm']['windows_os_info'] = {'windows_os_state': _snake_to_camel(windows_os_state, True)}
elif linux_os_state:
self.custom_image['vm']['linux_os_info'] = {'linux_os_state': _snake_to_camel(linux_os_state, True)}
else:
self.fail("Either 'linux_os_state' or 'linux_os_state' must be specified")
response = None
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
base_url=self._cloud_environment.endpoints.resource_manager)
old_response = self.get_customimage()
if not old_response:
self.log("Custom Image instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Custom Image instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
if (not default_compare(self.custom_image, old_response, '', self.results)):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Custom Image instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_customimage()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Custom Image instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_customimage()
# This currently doesnt' work as there is a bug in SDK / Service
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
else:
self.log("Custom Image instance unchanged")
self.results['changed'] = False
response = old_response
if self.state == 'present':
self.results.update({
'id': response.get('id', None)
})
return self.results
def create_update_customimage(self):
'''
Creates or updates Custom Image with the specified configuration.
:return: deserialized Custom Image instance state dictionary
'''
self.log("Creating / Updating the Custom Image instance {0}".format(self.name))
try:
response = self.mgmt_client.custom_images.create_or_update(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=self.name,
custom_image=self.custom_image)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Custom Image instance.')
self.fail("Error creating the Custom Image instance: {0}".format(str(exc)))
return response.as_dict()
def delete_customimage(self):
'''
Deletes specified Custom Image instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Custom Image instance {0}".format(self.name))
try:
response = self.mgmt_client.custom_images.delete(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Custom Image instance.')
self.fail("Error deleting the Custom Image instance: {0}".format(str(e)))
return True
def get_customimage(self):
'''
Gets the properties of the specified Custom Image.
:return: deserialized Custom Image instance state dictionary
'''
self.log("Checking if the Custom Image instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.custom_images.get(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Custom Image instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Custom Image instance.')
if found is True:
return response.as_dict()
return False
def default_compare(new, old, path, result):
if new is None:
return True
elif isinstance(new, dict):
if not isinstance(old, dict):
result['compare'] = 'changed [' + path + '] old dict is null'
return False
for k in new.keys():
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
return False
return True
elif isinstance(new, list):
if not isinstance(old, list) or len(new) != len(old):
result['compare'] = 'changed [' + path + '] length is different or null'
return False
if isinstance(old[0], dict):
key = None
if 'id' in old[0] and 'id' in new[0]:
key = 'id'
elif 'name' in old[0] and 'name' in new[0]:
key = 'name'
else:
key = list(old[0])[0]
new = sorted(new, key=lambda x: x.get(key, None))
old = sorted(old, key=lambda x: x.get(key, None))
else:
new = sorted(new)
old = sorted(old)
for i in range(len(new)):
if not default_compare(new[i], old[i], path + '/*', result):
return False
return True
else:
if path == '/location':
new = new.replace(' ', '').lower()
old = new.replace(' ', '').lower()
if new == old:
return True
else:
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
return False
def dict_camelize(d, path, camelize_first):
if isinstance(d, list):
for i in range(len(d)):
dict_camelize(d[i], path, camelize_first)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.get(path[0], None)
if old_value is not None:
d[path[0]] = _snake_to_camel(old_value, camelize_first)
else:
sd = d.get(path[0], None)
if sd is not None:
dict_camelize(sd, path[1:], camelize_first)
def main():
"""Main execution"""
AzureRMDtlCustomImage()
if __name__ == '__main__':
main()
|
bilalliberty/android_kernel_htc_liberty-villec2 | refs/heads/cm-11.0 | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
eBay/restcommander | refs/heads/master | play-1.2.4/python/Lib/curses/__init__.py | 61 | """curses
The main package for curses support for Python. Normally used by importing
the package, and perhaps a particular module inside it.
import curses
from curses import textpad
curses.initwin()
...
"""
__revision__ = "$Id: __init__.py 61064 2008-02-25 16:29:58Z andrew.kuchling $"
from _curses import *
from curses.wrapper import wrapper
import os as _os
import sys as _sys
# Some constants, most notably the ACS_* ones, are only added to the C
# _curses module's dictionary after initscr() is called. (Some
# versions of SGI's curses don't define values for those constants
# until initscr() has been called.) This wrapper function calls the
# underlying C initscr(), and then copies the constants from the
# _curses module to the curses package's dictionary. Don't do 'from
# curses import *' if you'll be needing the ACS_* constants.
def initscr():
import _curses, curses
# we call setupterm() here because it raises an error
# instead of calling exit() in error cases.
setupterm(term=_os.environ.get("TERM", "unknown"),
fd=_sys.__stdout__.fileno())
stdscr = _curses.initscr()
for key, value in _curses.__dict__.items():
if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
setattr(curses, key, value)
return stdscr
# This is a similar wrapper for start_color(), which adds the COLORS and
# COLOR_PAIRS variables which are only available after start_color() is
# called.
def start_color():
import _curses, curses
retval = _curses.start_color()
if hasattr(_curses, 'COLORS'):
curses.COLORS = _curses.COLORS
if hasattr(_curses, 'COLOR_PAIRS'):
curses.COLOR_PAIRS = _curses.COLOR_PAIRS
return retval
# Import Python has_key() implementation if _curses doesn't contain has_key()
try:
has_key
except NameError:
from has_key import has_key
|
ITOO-UrFU/open-programs | refs/heads/dev | open_programs/apps/courses/serializers.py | 1 | from .models import Course, Session
from rest_framework import serializers
class CourseSerializer(serializers.HyperlinkedModelSerializer):
# authors = serializers.HyperlinkedRelatedField(
# many=True,
# read_only=True,
# view_name='person-detail'
# )
#
# staff = serializers.HyperlinkedRelatedField(
# many=True,
# read_only=True,
# view_name='person-detail'
# )
sessions = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='session-detail'
)
results = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='result-detail'
)
class Meta:
model = Course
fields = ("id", "title", "description", "slug", "about", "cover", "video", "video_cover", "workload", "points",
"duration", "sessions", "results", "results_text", "status", "archived", "created",
"updated") # "authors", "authors_ordering", "staff",
class CourseIdSerializer(serializers.ModelSerializer):
class Meta:
depth = 1
model = Course
fields = ('id',)
read_only_fields = ('id',)
class SessionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Session
fields = ("slug", "startdate", "enddate", "status", "archived", "created", "updated")
|
richardcs/ansible | refs/heads/devel | lib/ansible/module_utils/facts/other/facter.py | 232 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts.collector import BaseFactCollector
class FacterFactCollector(BaseFactCollector):
name = 'facter'
_fact_ids = set(['facter'])
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='facter',
prefix='facter_')
super(FacterFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_facter(self, module):
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
return facter_path
def run_facter(self, module, facter_path):
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = module.run_command(facter_path + " --puppet --json")
return rc, out, err
def get_facter_output(self, module):
facter_path = self.find_facter(module)
if not facter_path:
return None
rc, out, err = self.run_facter(module, facter_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
# Note that this mirrors previous facter behavior, where there isnt
# a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
# items are added to the main dict.
facter_dict = {}
if not module:
return facter_dict
facter_output = self.get_facter_output(module)
# TODO: if we fail, should we add a empty facter key or nothing?
if facter_output is None:
return facter_dict
try:
facter_dict = json.loads(facter_output)
except Exception:
# FIXME: maybe raise a FactCollectorError with some info attrs?
pass
return facter_dict
|
isandlaTech/cohorte-runtime | refs/heads/master | python/src/lib/python/nvd3/discreteBarChart.py | 3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from .NVD3Chart import NVD3Chart
class discreteBarChart(NVD3Chart):
"""
A discrete bar chart or bar graph is a chart with rectangular bars with
lengths proportional to the values that they represent.
.. image:: ../_static/doc_images/discreteBarChart.png
Python example::
from nvd3 import discreteBarChart
chart = discreteBarChart(name='discreteBarChart', height=400, width=400)
xdata = ["A", "B", "C", "D", "E", "F"]
ydata = [3, 4, 0, -3, 5, 7]
chart.add_serie(y=ydata, x=xdata)
chart.buildhtml()
Javascript generated::
nv.addGraph(function() {
var chart = nv.models.discreteBarChart();
chart.tooltipContent(function(key, y, e, graph) {
var x = String(graph.point.x);
var y = String(graph.point.y);
var y = String(graph.point.y);
tooltip_str = '<center><b>'+key+'</b></center>' + y + ' at ' + x;
return tooltip_str;
});
d3.select('#discreteBarChart svg')
.datum(data_discreteBarChart)
.transition().duration(500)
.attr('width', 400)
.attr('height', 400)
.call(chart);
return chart;
});data_discreteBarChart=[
{"key": "Serie 1",
"yAxis": "1",
"values": [{"x": "A", "y": 3},
{"x": "B", "y": 4},
{"x": "C", "y": 0},
{"x": "D", "y": 3},
{"x": "E", "y": 5},
{"x": "F", "y": 7}
]}];
"""
def __init__(self, **kwargs):
NVD3Chart.__init__(self, **kwargs)
# self.slugify_name(kwargs.get('name', 'discreteBarChart'))
height = kwargs.get('height', 450)
width = kwargs.get('width', None)
if kwargs.get('x_is_date', False):
self.set_date_flag(True)
self.create_x_axis('xAxis',
format=kwargs.get('x_axis_format', "%d %b %Y %H %S"),
date=True)
else:
self.create_x_axis('xAxis', format=None)
self.set_custom_tooltip_flag(True)
# must have a specified height, otherwise it superimposes both charts
if height:
self.set_graph_height(height)
if width:
self.set_graph_width(width)
|
dmsimard/ansible | refs/heads/devel | test/units/vars/test_module_response_deepcopy.py | 118 | # -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.vars.clean import module_response_deepcopy
import pytest
def test_module_response_deepcopy_basic():
x = 42
y = module_response_deepcopy(x)
assert y == x
def test_module_response_deepcopy_atomic():
tests = [None, 42, 2**100, 3.14, True, False, 1j,
"hello", u"hello\u1234"]
for x in tests:
assert module_response_deepcopy(x) is x
def test_module_response_deepcopy_list():
x = [[1, 2], 3]
y = module_response_deepcopy(x)
assert y == x
assert x is not y
assert x[0] is not y[0]
def test_module_response_deepcopy_empty_tuple():
x = ()
y = module_response_deepcopy(x)
assert x is y
@pytest.mark.skip(reason='No current support for this situation')
def test_module_response_deepcopy_tuple():
x = ([1, 2], 3)
y = module_response_deepcopy(x)
assert y == x
assert x is not y
assert x[0] is not y[0]
def test_module_response_deepcopy_tuple_of_immutables():
x = ((1, 2), 3)
y = module_response_deepcopy(x)
assert x is y
def test_module_response_deepcopy_dict():
x = {"foo": [1, 2], "bar": 3}
y = module_response_deepcopy(x)
assert y == x
assert x is not y
assert x["foo"] is not y["foo"]
|
prakritish/ansible | refs/heads/devel | lib/ansible/modules/network/nxos/nxos_rollback.py | 21 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_rollback
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Set a checkpoint or rollback to a checkpoint.
description:
- This module offers the ability to set a configuration checkpoint
file or rollback to a configuration checkpoint file on Cisco NXOS
switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Sometimes C(transport=nxapi) may cause a timeout error.
options:
checkpoint_file:
description:
- Name of checkpoint file to create. Mutually exclusive
with rollback_to.
required: false
default: null
rollback_to:
description:
- Name of checkpoint file to rollback to. Mutually exclusive
with checkpoint_file.
required: false
default: null
'''
EXAMPLES = '''
- nxos_rollback:
checkpoint_file: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
- nxos_rollback:
rollback_to: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
filename:
description: The filename of the checkpoint/rollback file.
returned: success
type: string
sample: 'backup.cfg'
status:
description: Which operation took place and whether it was successful.
returned: success
type: string
sample: 'rollback executed'
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import re
def execute_commands(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def prepare_show_command(command, module):
if module.params['transport'] == 'cli':
execute_commands(command, module)
elif module.params['transport'] == 'nxapi':
execute_commands(command, module, command_type='cli_show_ascii')
def checkpoint(filename, module):
commands = ['terminal dont-ask', 'checkpoint file %s' % filename]
prepare_show_command(commands, module)
def rollback(filename, module):
commands = ['rollback running-config file %s' % filename]
try:
module.configure(commands)
except AttributeError:
try:
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def main():
argument_spec = dict(
checkpoint_file=dict(required=False),
rollback_to=dict(required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['checkpoint_file',
'rollback_to']],
supports_check_mode=False)
checkpoint_file = module.params['checkpoint_file']
rollback_to = module.params['rollback_to']
status = None
filename = None
changed = False
try:
if checkpoint_file:
checkpoint(checkpoint_file, module)
status = 'checkpoint file created'
elif rollback_to:
rollback(rollback_to, module)
status = 'rollback executed'
changed = True
filename = rollback_to or checkpoint_file
except ShellError:
clie = get_exception()
module.fail_json(msg=str(clie))
module.exit_json(changed=changed, status=status, filename=filename)
if __name__ == '__main__':
main()
|
maralla/vim-fixup | refs/heads/master | pythonx/validator/__init__.py | 3 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import collections
import importlib
import json
import re
import vim
import logging
from .utils import config_logging, exe_exist, get_vim_var, to_unicode, \
find_file
config_logging()
logger = logging.getLogger('validator')
def _get_type(msg):
if msg.get('error') is not None:
return 'E'
if msg.get('warning') is not None:
return 'W'
return 'E' if msg.get('type', 'error').lower() == 'error' else 'W'
def get_type(msg):
return _get_type(msg)
def _read_args(path):
try:
with open(path) as f:
return ' '.join((l.strip() for l in f.readlines()))
except Exception:
return ''
class Meta(type):
def __init__(cls, name, bases, attrs):
if name not in ('Validator', 'Base'):
Validator._registry[cls.__filetype__][cls.checker] = cls()
return super(Meta, cls).__init__(name, bases, attrs)
Base = Meta('Base', (object,), {})
class Unusable(object):
def __get__(self, inst, owner):
raise RuntimeError('unusable')
class Validator(Base):
""" Base class for all checkers
Subclass must provide both `__filetype__` and `checker` attributes.
"""
_registry = collections.defaultdict(dict)
# File type the checker works on.
__filetype__ = Unusable()
# Checker name
checker = Unusable()
# When `True` the checker read file content from stdin. `False` means the
# checker read file content from a temporary file.
stdin = False
# If a file type has default checkers, only the defaults are used for
# checking. If no defaults, all checkers are used. If the user defined
# `g:validator_<filetype>_checkers`, the defined checkers has the highest
# priority and are used for checking.
default = False
# Arguments for the checker.
args = ''
# Option name for user to specify checker arguments
args_option = None
# binary name for user to specify the path of the checker executable
binary_option = None
# Check when text changed.
instant = True
_regex_map = {}
_cache = {}
_type_map = {
b'c': b'cpp'
}
enabled = True
def __getitem__(self, ft):
return self._registry.get(ft, {})
def __contains__(self, ft):
return ft in self._registry
def compose_loc(self, enum, bufnr, buf_type, text, col=-1):
return {
'col': col,
'enum': enum,
'bufnr': bufnr,
'type': buf_type,
'text': '[{}]{}'.format(self.checker, text)
}
def parse_loclist(self, loclist, bufnr):
logger.info('parse input = %s', [self, loclist, bufnr])
if self.checker not in self._regex_map:
self._regex_map[self.checker] = re.compile(self.regex, re.VERBOSE)
lists = []
for i, l in enumerate(loclist):
g = self._regex_map[self.checker].match(l)
if not g:
continue
loc = g.groupdict()
col = loc.get('col', -1)
loc.update(self.compose_loc(i + 1, bufnr, _get_type(loc),
loc.get('text', ''), col))
lists.append(loc)
logger.info('parsed lists = %s', lists)
return json.dumps(lists)
def format_cmd(self, fpath):
if not self.enabled or not self.filter(fpath):
return ''
if not exe_exist(self.binary):
logger.warning('%s not exist', self.binary)
return ''
return self.cmd(fpath)
@property
def cwd(self):
pass
def parse_arguments(self, file):
key = '{}-{}-{}'.format(self.__filetype__, self.checker, file)
if key not in self._cache:
path = find_file(file)
self._cache[key] = '' if path is None else _read_args(path)
return self._cache[key]
def filter(self, fpath):
return True
@property
def filename(self):
return vim.current.buffer.name
@property
def binary(self):
name = self.binary_option or '{}_{}'.format(
self.__filetype__, self.checker)
v = get_vim_var('{}_binary'.format(name), b'', unicode=True)
return v or self.checker
@property
def cmd_args(self):
name = self.args_option or '{}_{}'.format(
self.__filetype__, self.checker)
v = get_vim_var('{}_args'.format(name), b'', unicode=True)
return v or self.args
@property
def type_map(self):
v = get_vim_var('filetype_map', {})
self._type_map.update(v)
return self._type_map
def cmd(self, fname):
return "{} {} {}".format(self.binary, self.cmd_args, fname)
_validator = Validator()
def _get_filters(ft):
"""
:param ft: unicode
"""
checkers = get_vim_var('{}_checkers'.format(ft))
filters = None
if isinstance(checkers, (list, vim.List)):
filters = []
for c in checkers:
try:
c = to_unicode(c)
except Exception as e:
logger.exception(e)
continue
filters.append(c)
elif checkers:
try:
filters = [to_unicode(checkers)]
except Exception as e:
logger.exception(e)
return filters
def load_checkers(ft):
"""
:param ft: bytes
"""
if not ft:
return {}
ft = to_unicode(_validator.type_map.get(ft, ft))
if ft not in _validator:
try:
importlib.import_module('lints.{}'.format(ft))
except ImportError:
try:
importlib.import_module('validator_{}'.format(ft))
except ImportError:
_validator._registry[ft] = {}
checkers = _validator[ft]
if not checkers:
return {}
filters = _get_filters(ft)
if filters is None:
return {k: c for k, c in checkers.items() if c.default} or checkers
return {k: c for k, c in checkers.items() if k in filters}
|
logicus4078/vertx-web | refs/heads/master | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/latin1prober.py | 950 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.5
return confidence
|
LCBRU/reporter | refs/heads/master | reporter/uhl_reports/national_bioresource/data_quality/civicrm.py | 1 | #!/usr/bin/env python3
from reporter.emailing import RECIPIENT_BIORESOURCE_ADMIN
from reporter.uhl_reports.civicrm.enrolment_dq import (
DuplicateStudyIdReport,
MissingStudyNumber,
MultipleRecruitementsReport,
CivicrmInvalidCaseStatus
)
class NationalBioresourceCiviCrmMissingStudyNumber(MissingStudyNumber):
def __init__(self):
super().__init__(
27,
recipients=[RECIPIENT_BIORESOURCE_ADMIN])
class NationalBioresourceCiviCrmDuplicateStudyNumber(DuplicateStudyIdReport):
def __init__(self):
super().__init__(
27,
recipients=[RECIPIENT_BIORESOURCE_ADMIN])
class NationalBioresourceCiviCrmMultipleRecruitments(MultipleRecruitementsReport):
def __init__(self):
super().__init__(
27,
recipients=[RECIPIENT_BIORESOURCE_ADMIN])
class NationalBioresourceCivicrmInvalidCaseStatus(CivicrmInvalidCaseStatus):
def __init__(self):
super().__init__(
27,
[
'Recruited',
'Declined',
'Failed to Respond',
'Recruitment Pending',
'Excluded',
'Duplicate',
'Withdrawn'
],
[RECIPIENT_BIORESOURCE_ADMIN])
|
yvonguy/pi_thermostat | refs/heads/master | therm/app/plots.py | 1 | '''This example demonstrates embedding a standalone Bokeh document
into a simple Flask application, with a basic HTML web form.
To view the example, run:
python simple.py
in this directory, and navigate to:
http://localhost:5000
'''
from __future__ import print_function
import flask
import datetime
from datetime import datetime, date, time
from therm.app import app
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.resources import CDN
from bokeh.templates import CSS_RESOURCES, JS_RESOURCES
from bokeh.util.string import encode_utf8
from bokeh.models import Range1d
import math
colors = {
'Black': '#000000',
'Red': '#FF0000',
'Green': '#00FF00',
'Blue': '#0000FF',
}
def getitem(obj, item, default):
if item not in obj:
return default
else:
return obj[item]
def temp_model(t, t_sunrise, t_sunset, T_max, T_min):
"""
At sunrise, the temperature starts to increase again T = T_min
At sunset, the temperature started the decrease already: T = (T_max+T_min)/2
"""
#t_sunrise = 5.0
#t_sunset = 22.0
len_day = (t_sunset - t_sunrise)
len_night = 24-len_day
#T_max = 35.0
#T_min = 20.0
T0 = (T_max + T_min) / 2.0 #27.0 # mean temperature
TA = (T_max - T_min) / 2.0 #5.0 # temperature variance
#tm = 14.0 # time when Max temp is reached
t = t % 24.0
if t < t_sunrise:
y = T0 - TA * (24-(t_sunset-t))/len_night
elif t > t_sunset:
y = T0 - TA * (t-t_sunset)/len_night
else:
y = T0 + TA*math.cos(math.pi * (1.5*(t-t_sunrise)/len_day-1))
return y
# all time arguments should be in datetime.time
def temp_model3(t, t_sunrise, t_sunset, T_max, T_min):
"""
At sunrise, the temperature starts to increase again T = T_min
At sunset, the temperature started the decrease already: T = (T_max+T_min)/2
"""
len_day = (t_sunset - t_sunrise).total_seconds()
len_night = datetime.deltatime(24).total_seconds-len_day
# time of maximum temperature, for now noon + 3h
t_max = (t_sunset + t_sunrise)/2 + datetime.deltatime(3).total_seconds()
TA = (T_max-T_min)/(1-math.cos(math.pi*(t_sunrise-t_max)/len_day))
T0 = T_max - TA
T_sunrise = T_min
T_sunset = T0 + TA*math.cos(math.pi*(t_sunset-t_max)/len_day)
T_delta = T_sunset - T_sunrise
#tm = 14.0 # time when Max temp is reached
t = t % 24.0
if t < t_sunrise:
y = T_sunrise + T_delta * (t_sunrise-t)/len_night
elif t > t_sunset:
y = T_sunset - T_delta * (t-t_sunset)/len_night
else:
y = T0 + TA*math.cos(math.pi*(t-t_max)/len_day)
return y
def hoursToSeconds(hours):
return hours * 60 * 60
def timeToSeconds(t):
return (t.hour*60 + t.minute)*60 + t.second
def timeToMinutes(t):
# round to nearest minute
return int(timeToSeconds(t)/60)
@app.route("/plot")
def polynomial():
""" Very simple embedding of a polynomial chart
"""
# Grab the inputs arguments from the URL
# This is automated by the button
args = flask.request.args
# Get all the form arguments in the url with defaults
color = colors[getitem(args, 'color', 'Black')]
today = date.today()
t_start = time(int(getitem(args, 'start', 0)))
t_end = time(int(getitem(args, 'end', 24)))
T_min = int(getitem(args, 'T_min', 25))
T_max = int(getitem(args, 'T_max', 32))
t_sunrise = time(int(getitem(args, 'sunrise', 5)))
t_sunset = time(int(getitem(args, 'sunset', 22)))
# Create a polynomial line graph
minutes = list(range(timeToMinutes(t_start), timeToMinutes(t_end) + 1))
x = [time(hour=i/60, minute=i%60) for i in minutes]
fig = figure(title="Temperature Model",
x_axis_type = "datetime")
fig.line(x,
[temp_model3(i, t_sunrise, t_sunset, T_max, T_min) for i in minutes],
color=color,
line_width=2)
fig.yaxis.axis_label = 'Temperature'
fig.y_range = Range1d(start=0, end=35)
fig.xaxis.axis_label = 'Time'
# Configure resources to include BokehJS inline in the document.
# For more details see:
# http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#bokeh-embed
resource = CDN
js_resources = JS_RESOURCES.render(js_raw=resource.js_raw,
js_files=resource.js_files)
css_resources = CSS_RESOURCES.render(css_raw=resource.css_raw,
css_files=resource.css_files)
# For more details see:
# http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components
script, div = components(fig, resource)
html = flask.render_template(
'embed2.html',
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources,
color=color,
start=t_start,
end=t_end,
T_min=T_min,
T_max=T_max,
sunrise=sunrise,
sunset=sunset
)
return encode_utf8(html)
@app.route("/plot2")
def polynomial2():
""" Very simple embedding of a polynomial chart
"""
# Grab the inputs arguments from the URL
# This is automated by the button
args = flask.request.args
# Get all the form arguments in the url with defaults
color = colors[getitem(args, 'color', 'Black')]
start = datetime.time(int(getitem(args, 'start', 0)))
end = datetime.time(int(getitem(args, 'end', 24)))
T_min = int(getitem(args, 'T_min', 25))
T_max = int(getitem(args, 'T_max', 32))
sunrise = datetime.time(int(getitem(args, 'sunrise', 5)))
sunset = dateimte.time(int(getitem(args, 'sunset', 22)))
# Create a polynomial line graph
x = list(range(start, end + 1))
fig = figure(title="Temperature Model")
fig.line(x, [temp_model3(i, sunrise, sunset, T_max, T_min) for i in x], color=color, line_width=2)
fig.yaxis.axis_label = 'Temperature'
fig.y_range = Range1d(start=0, end=35)
fig.xaxis.axis_label = 'Time'
# Configure resources to include BokehJS inline in the document.
# For more details see:
# http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#bokeh-embed
resource = CDN
js_resources = JS_RESOURCES.render(js_raw=resource.js_raw,
js_files=resource.js_files)
css_resources = CSS_RESOURCES.render(css_raw=resource.css_raw,
css_files=resource.css_files)
# For more details see:
# http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components
script, div = components(fig, resource)
html = flask.render_template(
'embed2.html',
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources,
color=color,
start=start,
end=end,
T_min=T_min,
T_max=T_max,
sunrise=sunrise,
sunset=sunset
)
return encode_utf8(html)
|
RO-ny9/python-for-android | refs/heads/master | python-modules/twisted/twisted/words/protocols/jabber/error.py | 49 | # -*- test-case-name: twisted.words.test.test_jabbererror -*-
#
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
XMPP Error support.
"""
import copy
from twisted.words.xish import domish
NS_XML = "http://www.w3.org/XML/1998/namespace"
NS_XMPP_STREAMS = "urn:ietf:params:xml:ns:xmpp-streams"
NS_XMPP_STANZAS = "urn:ietf:params:xml:ns:xmpp-stanzas"
STANZA_CONDITIONS = {
'bad-request': {'code': '400', 'type': 'modify'},
'conflict': {'code': '409', 'type': 'cancel'},
'feature-not-implemented': {'code': '501', 'type': 'cancel'},
'forbidden': {'code': '403', 'type': 'auth'},
'gone': {'code': '302', 'type': 'modify'},
'internal-server-error': {'code': '500', 'type': 'wait'},
'item-not-found': {'code': '404', 'type': 'cancel'},
'jid-malformed': {'code': '400', 'type': 'modify'},
'not-acceptable': {'code': '406', 'type': 'modify'},
'not-allowed': {'code': '405', 'type': 'cancel'},
'not-authorized': {'code': '401', 'type': 'auth'},
'payment-required': {'code': '402', 'type': 'auth'},
'recipient-unavailable': {'code': '404', 'type': 'wait'},
'redirect': {'code': '302', 'type': 'modify'},
'registration-required': {'code': '407', 'type': 'auth'},
'remote-server-not-found': {'code': '404', 'type': 'cancel'},
'remote-server-timeout': {'code': '504', 'type': 'wait'},
'resource-constraint': {'code': '500', 'type': 'wait'},
'service-unavailable': {'code': '503', 'type': 'cancel'},
'subscription-required': {'code': '407', 'type': 'auth'},
'undefined-condition': {'code': '500', 'type': None},
'unexpected-request': {'code': '400', 'type': 'wait'},
}
CODES_TO_CONDITIONS = {
'302': ('gone', 'modify'),
'400': ('bad-request', 'modify'),
'401': ('not-authorized', 'auth'),
'402': ('payment-required', 'auth'),
'403': ('forbidden', 'auth'),
'404': ('item-not-found', 'cancel'),
'405': ('not-allowed', 'cancel'),
'406': ('not-acceptable', 'modify'),
'407': ('registration-required', 'auth'),
'408': ('remote-server-timeout', 'wait'),
'409': ('conflict', 'cancel'),
'500': ('internal-server-error', 'wait'),
'501': ('feature-not-implemented', 'cancel'),
'502': ('service-unavailable', 'wait'),
'503': ('service-unavailable', 'cancel'),
'504': ('remote-server-timeout', 'wait'),
'510': ('service-unavailable', 'cancel'),
}
class BaseError(Exception):
"""
Base class for XMPP error exceptions.
@cvar namespace: The namespace of the C{error} element generated by
C{getElement}.
@type namespace: C{str}
@ivar condition: The error condition. The valid values are defined by
subclasses of L{BaseError}.
@type contition: C{str}
@ivar text: Optional text message to supplement the condition or application
specific condition.
@type text: C{unicode}
@ivar textLang: Identifier of the language used for the message in C{text}.
Values are as described in RFC 3066.
@type textLang: C{str}
@ivar appCondition: Application specific condition element, supplementing
the error condition in C{condition}.
@type appCondition: object providing L{domish.IElement}.
"""
namespace = None
def __init__(self, condition, text=None, textLang=None, appCondition=None):
Exception.__init__(self)
self.condition = condition
self.text = text
self.textLang = textLang
self.appCondition = appCondition
def __str__(self):
message = "%s with condition %r" % (self.__class__.__name__,
self.condition)
if self.text:
message += ': ' + self.text
return message
def getElement(self):
"""
Get XML representation from self.
The method creates an L{domish} representation of the
error data contained in this exception.
@rtype: L{domish.Element}
"""
error = domish.Element((None, 'error'))
error.addElement((self.namespace, self.condition))
if self.text:
text = error.addElement((self.namespace, 'text'),
content=self.text)
if self.textLang:
text[(NS_XML, 'lang')] = self.textLang
if self.appCondition:
error.addChild(self.appCondition)
return error
class StreamError(BaseError):
"""
Stream Error exception.
Refer to RFC 3920, section 4.7.3, for the allowed values for C{condition}.
"""
namespace = NS_XMPP_STREAMS
def getElement(self):
"""
Get XML representation from self.
Overrides the base L{BaseError.getElement} to make sure the returned
element is in the XML Stream namespace.
@rtype: L{domish.Element}
"""
from twisted.words.protocols.jabber.xmlstream import NS_STREAMS
error = BaseError.getElement(self)
error.uri = NS_STREAMS
return error
class StanzaError(BaseError):
"""
Stanza Error exception.
Refer to RFC 3920, section 9.3, for the allowed values for C{condition} and
C{type}.
@ivar type: The stanza error type. Gives a suggestion to the recipient
of the error on how to proceed.
@type type: C{str}
@ivar code: A numeric identifier for the error condition for backwards
compatibility with pre-XMPP Jabber implementations.
"""
namespace = NS_XMPP_STANZAS
def __init__(self, condition, type=None, text=None, textLang=None,
appCondition=None):
BaseError.__init__(self, condition, text, textLang, appCondition)
if type is None:
try:
type = STANZA_CONDITIONS[condition]['type']
except KeyError:
pass
self.type = type
try:
self.code = STANZA_CONDITIONS[condition]['code']
except KeyError:
self.code = None
self.children = []
self.iq = None
def getElement(self):
"""
Get XML representation from self.
Overrides the base L{BaseError.getElement} to make sure the returned
element has a C{type} attribute and optionally a legacy C{code}
attribute.
@rtype: L{domish.Element}
"""
error = BaseError.getElement(self)
error['type'] = self.type
if self.code:
error['code'] = self.code
return error
def toResponse(self, stanza):
"""
Construct error response stanza.
The C{stanza} is transformed into an error response stanza by
swapping the C{to} and C{from} addresses and inserting an error
element.
@note: This creates a shallow copy of the list of child elements of the
stanza. The child elements themselves are not copied themselves,
and references to their parent element will still point to the
original stanza element.
The serialization of an element does not use the reference to
its parent, so the typical use case of immediately sending out
the constructed error response is not affected.
@param stanza: the stanza to respond to
@type stanza: L{domish.Element}
"""
from twisted.words.protocols.jabber.xmlstream import toResponse
response = toResponse(stanza, stanzaType='error')
response.children = copy.copy(stanza.children)
response.addChild(self.getElement())
return response
def _getText(element):
for child in element.children:
if isinstance(child, basestring):
return unicode(child)
return None
def _parseError(error, errorNamespace):
"""
Parses an error element.
@param error: The error element to be parsed
@type error: L{domish.Element}
@param errorNamespace: The namespace of the elements that hold the error
condition and text.
@type errorNamespace: C{str}
@return: Dictionary with extracted error information. If present, keys
C{condition}, C{text}, C{textLang} have a string value,
and C{appCondition} has an L{domish.Element} value.
@rtype: L{dict}
"""
condition = None
text = None
textLang = None
appCondition = None
for element in error.elements():
if element.uri == errorNamespace:
if element.name == 'text':
text = _getText(element)
textLang = element.getAttribute((NS_XML, 'lang'))
else:
condition = element.name
else:
appCondition = element
return {
'condition': condition,
'text': text,
'textLang': textLang,
'appCondition': appCondition,
}
def exceptionFromStreamError(element):
"""
Build an exception object from a stream error.
@param element: the stream error
@type element: L{domish.Element}
@return: the generated exception object
@rtype: L{StreamError}
"""
error = _parseError(element, NS_XMPP_STREAMS)
exception = StreamError(error['condition'],
error['text'],
error['textLang'],
error['appCondition'])
return exception
def exceptionFromStanza(stanza):
"""
Build an exception object from an error stanza.
@param stanza: the error stanza
@type stanza: L{domish.Element}
@return: the generated exception object
@rtype: L{StanzaError}
"""
children = []
condition = text = textLang = appCondition = type = code = None
for element in stanza.elements():
if element.name == 'error' and element.uri == stanza.uri:
code = element.getAttribute('code')
type = element.getAttribute('type')
error = _parseError(element, NS_XMPP_STANZAS)
condition = error['condition']
text = error['text']
textLang = error['textLang']
appCondition = error['appCondition']
if not condition and code:
condition, type = CODES_TO_CONDITIONS[code]
text = _getText(stanza.error)
else:
children.append(element)
if condition is None:
# TODO: raise exception instead?
return StanzaError(None)
exception = StanzaError(condition, type, text, textLang, appCondition)
exception.children = children
exception.stanza = stanza
return exception
|
martinkirch/tofbot | refs/heads/master | plugins/teachme.py | 1 | # -*- coding: utf-8 -*-
#
# This file is part of tofbot, a friendly IRC bot.
# You may redistribute it under the Simplified BSD License.
# If we meet some day, and you think this stuff is worth it,
# you can buy us a beer in return.
#
# Copyright (c) 2011 Christophe-Marie Duquesne <chm.duquesne@gmail.com>
# Etienne Millon <etienne.millon@gmail.com>
#
from __future__ import with_statement
from toflib import Plugin
from toflib import distance
from classifier import NaiveBayesClassifier, MemoryBackend
import json
class PluginTeachMe(Plugin):
def __init__(self, *args):
Plugin.__init__(self, *args)
self.classifier = None
self.load({})
self.curr_msg = ''
self.last_msg = ''
self.last_joke = ()
self.just_joked = False
def load(self, data):
storage_backend = MemoryBackend(data)
self.classifier = NaiveBayesClassifier(storage_backend)
def save(self):
return self.classifier.storage.data
def get_what_to_learn(self):
if self.curr_msg in ('CMB', 'cmb'):
return 'CMB'
if self.curr_msg in ('CTB', 'ctb'):
return 'CTB'
if self.curr_msg in ('TWSS', 'twss'):
return "That's what she said!"
return 'None'
def got_congratulated(self):
return self.curr_msg in ('GG', 'gg', 'GG Tofbot', 'gg Tofbot')
def did_bad_joke(self):
return self.curr_msg in ('TG', 'tg', 'TG Tofbot', 'tg Tofbot')
def handle_msg(self, msg_text, chan, nick):
just_joked = self.just_joked
self.just_joked = False
self.last_msg = self.curr_msg
self.curr_msg = msg_text.strip()
if self.got_congratulated():
if self.last_joke:
self.classifier.train(*self.last_joke)
elif self.did_bad_joke():
if self.last_joke:
self.classifier.train(self.last_joke[0], 'None')
else:
scores = self.classifier.classify(self.curr_msg.split())
joke = 'None'
if scores:
joke = scores[0][0]
if joke != 'None':
self.say(joke)
self.last_joke = (self.curr_msg.split(), joke)
else:
if not just_joked:
self.classifier.train(self.last_msg.split(),
self.get_what_to_learn())
|
aarchiba/scipy | refs/heads/master | scipy/stats/contingency.py | 7 | """Some functions for working with contingency tables (i.e. cross tabulations).
"""
from __future__ import division, print_function, absolute_import
from functools import reduce
import numpy as np
from .stats import power_divergence
__all__ = ['margins', 'expected_freq', 'chi2_contingency']
def margins(a):
"""Return a list of the marginal sums of the array `a`.
Parameters
----------
a : ndarray
The array for which to compute the marginal sums.
Returns
-------
margsums : list of ndarrays
A list of length `a.ndim`. `margsums[k]` is the result
of summing `a` over all axes except `k`; it has the same
number of dimensions as `a`, but the length of each axis
except axis `k` will be 1.
Examples
--------
>>> a = np.arange(12).reshape(2, 6)
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11]])
>>> from scipy.stats.contingency import margins
>>> m0, m1 = margins(a)
>>> m0
array([[15],
[51]])
>>> m1
array([[ 6, 8, 10, 12, 14, 16]])
>>> b = np.arange(24).reshape(2,3,4)
>>> m0, m1, m2 = margins(b)
>>> m0
array([[[ 66]],
[[210]]])
>>> m1
array([[[ 60],
[ 92],
[124]]])
>>> m2
array([[[60, 66, 72, 78]]])
"""
margsums = []
ranged = list(range(a.ndim))
for k in ranged:
marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k])
margsums.append(marg)
return margsums
def expected_freq(observed):
"""
Compute the expected frequencies from a contingency table.
Given an n-dimensional contingency table of observed frequencies,
compute the expected frequencies for the table based on the marginal
sums under the assumption that the groups associated with each
dimension are independent.
Parameters
----------
observed : array_like
The table of observed frequencies. (While this function can handle
a 1-D array, that case is trivial. Generally `observed` is at
least 2-D.)
Returns
-------
expected : ndarray of float64
The expected frequencies, based on the marginal sums of the table.
Same shape as `observed`.
Examples
--------
>>> observed = np.array([[10, 10, 20],[20, 20, 20]])
>>> from scipy.stats.contingency import expected_freq
>>> expected_freq(observed)
array([[ 12., 12., 16.],
[ 18., 18., 24.]])
"""
# Typically `observed` is an integer array. If `observed` has a large
# number of dimensions or holds large values, some of the following
# computations may overflow, so we first switch to floating point.
observed = np.asarray(observed, dtype=np.float64)
# Create a list of the marginal sums.
margsums = margins(observed)
# Create the array of expected frequencies. The shapes of the
# marginal sums returned by apply_over_axes() are just what we
# need for broadcasting in the following product.
d = observed.ndim
expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)
return expected
def chi2_contingency(observed, correction=True, lambda_=None):
"""Chi-square test of independence of variables in a contingency table.
This function computes the chi-square statistic and p-value for the
hypothesis test of independence of the observed frequencies in the
contingency table [1]_ `observed`. The expected frequencies are computed
based on the marginal sums under the assumption of independence; see
`scipy.stats.contingency.expected_freq`. The number of degrees of
freedom is (expressed using numpy functions and attributes)::
dof = observed.size - sum(observed.shape) + observed.ndim - 1
Parameters
----------
observed : array_like
The contingency table. The table contains the observed frequencies
(i.e. number of occurrences) in each category. In the two-dimensional
case, the table is often described as an "R x C table".
correction : bool, optional
If True, *and* the degrees of freedom is 1, apply Yates' correction
for continuity. The effect of the correction is to adjust each
observed value by 0.5 towards the corresponding expected value.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic [2]_. `lambda_` allows a statistic from the
Cressie-Read power divergence family [3]_ to be used instead. See
`power_divergence` for details.
Returns
-------
chi2 : float
The test statistic.
p : float
The p-value of the test
dof : int
Degrees of freedom
expected : ndarray, same shape as `observed`
The expected frequencies, based on the marginal sums of the table.
See Also
--------
contingency.expected_freq
fisher_exact
chisquare
power_divergence
Notes
-----
An often quoted guideline for the validity of this calculation is that
the test should be used only if the observed and expected frequencies
in each cell are at least 5.
This is a test for the independence of different categories of a
population. The test is only meaningful when the dimension of
`observed` is two or more. Applying the test to a one-dimensional
table will always result in `expected` equal to `observed` and a
chi-square statistic equal to 0.
This function does not handle masked arrays, because the calculation
does not make sense with missing values.
Like stats.chisquare, this function computes a chi-square statistic;
the convenience this function provides is to figure out the expected
frequencies and degrees of freedom from the given contingency table.
If these were already known, and if the Yates' correction was not
required, one could use stats.chisquare. That is, if one calls::
chi2, p, dof, ex = chi2_contingency(obs, correction=False)
then the following is true::
(chi2, p) == stats.chisquare(obs.ravel(), f_exp=ex.ravel(),
ddof=obs.size - 1 - dof)
The `lambda_` argument was added in version 0.13.0 of scipy.
References
----------
.. [1] "Contingency table",
https://en.wikipedia.org/wiki/Contingency_table
.. [2] "Pearson's chi-squared test",
https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
.. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
A two-way example (2 x 3):
>>> from scipy.stats import chi2_contingency
>>> obs = np.array([[10, 10, 20], [20, 20, 20]])
>>> chi2_contingency(obs)
(2.7777777777777777,
0.24935220877729619,
2,
array([[ 12., 12., 16.],
[ 18., 18., 24.]]))
Perform the test using the log-likelihood ratio (i.e. the "G-test")
instead of Pearson's chi-squared statistic.
>>> g, p, dof, expctd = chi2_contingency(obs, lambda_="log-likelihood")
>>> g, p
(2.7688587616781319, 0.25046668010954165)
A four-way example (2 x 2 x 2 x 2):
>>> obs = np.array(
... [[[[12, 17],
... [11, 16]],
... [[11, 12],
... [15, 16]]],
... [[[23, 15],
... [30, 22]],
... [[14, 17],
... [15, 16]]]])
>>> chi2_contingency(obs)
(8.7584514426741897,
0.64417725029295503,
11,
array([[[[ 14.15462386, 14.15462386],
[ 16.49423111, 16.49423111]],
[[ 11.2461395 , 11.2461395 ],
[ 13.10500554, 13.10500554]]],
[[[ 19.5591166 , 19.5591166 ],
[ 22.79202844, 22.79202844]],
[[ 15.54012004, 15.54012004],
[ 18.10873492, 18.10873492]]]]))
"""
observed = np.asarray(observed)
if np.any(observed < 0):
raise ValueError("All values in `observed` must be nonnegative.")
if observed.size == 0:
raise ValueError("No data; `observed` has size 0.")
expected = expected_freq(observed)
if np.any(expected == 0):
# Include one of the positions where expected is zero in
# the exception message.
zeropos = list(zip(*np.nonzero(expected == 0)))[0]
raise ValueError("The internally computed table of expected "
"frequencies has a zero element at %s." % (zeropos,))
# The degrees of freedom
dof = expected.size - sum(expected.shape) + expected.ndim - 1
if dof == 0:
# Degenerate case; this occurs when `observed` is 1D (or, more
# generally, when it has only one nontrivial dimension). In this
# case, we also have observed == expected, so chi2 is 0.
chi2 = 0.0
p = 1.0
else:
if dof == 1 and correction:
# Adjust `observed` according to Yates' correction for continuity.
observed = observed + 0.5 * np.sign(expected - observed)
chi2, p = power_divergence(observed, expected,
ddof=observed.size - 1 - dof, axis=None,
lambda_=lambda_)
return chi2, p, dof, expected
|
loonycyborg/scons-plusplus | refs/heads/master | python_modules/Tool/sgiar.py | 2 | """SCons.Tool.sgiar
Tool-specific initialization for SGI ar (library archive). If CC
exists, static libraries should be built with it, so the prelinker has
a chance to resolve C++ template instantiations.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgiar.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
if env.Detect('CC'):
env['AR'] = 'CC'
env['ARFLAGS'] = SCons.Util.CLVar('-ar')
env['ARCOM'] = '$AR $ARFLAGS -o $TARGET $SOURCES'
else:
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('r')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
env['SHLINKCOM'] = '$SHLINK $SHLINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
def exists(env):
return env.Detect('CC') or env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
madhavsuresh/chimerascan | refs/heads/master | chimerascan/deprecated/merge_read_pairs.py | 6 | '''
Created on Jan 9, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import collections
import logging
# local imports
import chimerascan.pysam as pysam
from chimerascan.lib.base import parse_library_type
from chimerascan.lib.alignment_parser import parse_segmented_pe_sam_file
def map_reads_to_references(pe_reads):
# bin reads by reference name to find reads that pairs
# to the same gene/chromosome
ref_dict = collections.defaultdict(lambda: ([], []))
for mate, mate_hits in enumerate(pe_reads):
# matching paired-end reads cannot have splits
# to multiple references, and if multiple split
# partitions were found it suggests split read
# mapping occurred
num_split_partitions = len(mate_hits)
if num_split_partitions > 1:
continue
# reads with >1 split cannot be paired successfully
# so do not add to reference dict
if len(mate_hits[0]) > 1:
continue
# this read has a single partition of splits and is
# not split into multiple reads
split_reads = mate_hits[0][0]
for r in split_reads:
if r.is_unmapped:
continue
# add to reference dict
mate_pairs = ref_dict[r.rname]
mate_pairs[mate].append(r)
return ref_dict
def find_concordant_pairs(ref_dict, min_isize, max_isize,
library_type):
same_strand = (library_type[0] == library_type[1])
# check for mapping to same gene within insert size range
concordant_pairs = []
for rname, mate_pairs in ref_dict.iteritems():
# both pairs must map to same reference
if len(mate_pairs[0]) == 0 or len(mate_pairs[1]) == 0:
continue
# ensure distance is within insert size range
# and strandedness matches library type
for r1 in mate_pairs[0]:
for r2 in mate_pairs[1]:
# check insert size
if r1.pos > r2.pos:
isize = r1.aend - r2.pos
else:
isize = r2.aend - r1.pos
if isize < min_isize or isize > max_isize:
continue
# read strands must agree with library type
if same_strand != (r1.is_reverse == r2.is_reverse):
continue
# this is a concordant read pair
concordant_pairs.append((r1, r2))
return concordant_pairs
def select_best_pairs(mate1_reads, mate2_reads,
min_fragment_length,
max_fragment_length,
library_type):
pass
def pair_reads(r1, r2, add_tags=None, keep_tags=None):
'''
fill in paired-end fields in SAM record
'''
if keep_tags is None:
keep_tags = []
if add_tags is None:
add_tags = []
# convert read1 to paired-end
r1.is_paired = True
r1.is_proper_pair = True
r1.is_read1 = True
r1.mate_is_reverse = r2.is_reverse
r1.mate_is_unmapped = r2.is_unmapped
r1.mpos = r2.pos
r1.mrnm = r2.rname
# convert read2 to paired-end
r2.is_paired = True
r2.is_proper_pair = True
r2.is_read2 = True
r2.mate_is_reverse = r1.is_reverse
r2.mate_is_unmapped = r1.is_unmapped
r2.mpos = r1.pos
r2.mrnm = r1.rname
# compute insert size
if r1.pos > r2.pos:
isize = r1.aend - r2.pos
else:
isize = r2.aend - r1.pos
r1.isize = isize
r2.isize = isize
# update tags
r1_tags = []
r2_tags = []
for tagname in keep_tags:
r1_tags.append((tagname, r1.opt(tagname)))
r2_tags.append((tagname, r2.opt(tagname)))
r1_tags.extend(add_tags)
r2_tags.extend(add_tags)
r1.tags = r1_tags
r2.tags = r2_tags
def merge_read_pairs(bamfh, output_bamfh, min_isize, max_isize, library_type):
# setup debugging logging messages
debug_count = 0
debug_every = 1e6
debug_next = debug_every
num_paired = 0
num_unpaired = 0
num_fragments = 0
for pe_reads in parse_segmented_pe_sam_file(bamfh):
ref_dict = map_reads_to_references(pe_reads)
concordant_pairs = find_concordant_pairs(ref_dict, min_isize, max_isize,
library_type)
if len(concordant_pairs) > 0:
for r1,r2 in concordant_pairs:
pair_reads(r1, r2,
keep_tags=('NM', 'MD'),
add_tags=(('NH', len(concordant_pairs)),))
output_bamfh.write(r1)
output_bamfh.write(r2)
# TODO: filter to select best pairs (fewest mismatches, insert size, etc)
num_paired += 1
else:
# write unpaired reads to unpaired BAM file
for mate_hits in pe_reads:
for partitions in mate_hits:
for split_reads in partitions:
for r in split_reads:
output_bamfh.write(r)
num_unpaired += 1
num_fragments += 1
# progress log
debug_count += 1
if debug_count == debug_next:
debug_next += debug_every
logging.debug("Total read pairs: %d" % (num_fragments))
logging.debug("Paired reads: %d" % (num_paired))
logging.debug("Unpaired_reads: %d" % (num_unpaired))
logging.info("Total read pairs: %d" % (num_fragments))
logging.info("Paired reads: %d" % (num_paired))
logging.info("Unpaired_reads: %d" % (num_unpaired))
def main():
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <in.bam> <out.bam>")
parser.add_option('--min-fragment-length', dest="min_fragment_length",
type="int", default=50)
parser.add_option('--max-fragment-length', dest="max_fragment_length",
type="int", default=1000)
parser.add_option('--library', dest="library_type", default="fr")
#parser.add_option('--unpaired-bam', dest="unpaired_bam_file", default=None)
options, args = parser.parse_args()
input_bam_file = args[0]
output_bam_file = args[1]
logging.info("Merging read pairs")
logging.debug("Input file: %s" % (input_bam_file))
logging.debug("Output file: %s" % (output_bam_file))
logging.debug("Library type: '%s'" % (options.library_type))
library_type = parse_library_type(options.library_type)
bamfh = pysam.Samfile(input_bam_file, "rb")
outfh = pysam.Samfile(output_bam_file, "wb", template=bamfh)
#outfh = pysam.Samfile("-", "w", template=bamfh)
merge_read_pairs(bamfh, outfh,
options.min_fragment_length,
options.max_fragment_length,
library_type)
logging.info("Paired-end merging completed")
if __name__ == '__main__':
main()
|
artwr/airflow | refs/heads/master | airflow/contrib/operators/bigquery_check_operator.py | 3 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.operators.check_operator import \
CheckOperator, ValueCheckOperator, IntervalCheckOperator
from airflow.utils.decorators import apply_defaults
class BigQueryCheckOperator(CheckOperator):
"""
Performs checks against BigQuery. The ``BigQueryCheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param sql: the sql to be executed
:type sql: str
:param bigquery_conn_id: reference to the BigQuery database
:type bigquery_conn_id: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
"""
@apply_defaults
def __init__(self,
sql,
bigquery_conn_id='bigquery_default',
use_legacy_sql=True,
*args, **kwargs):
super(BigQueryCheckOperator, self).__init__(sql=sql, *args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
self.sql = sql
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql)
class BigQueryValueCheckOperator(ValueCheckOperator):
"""
Performs a simple value check using sql code.
:param sql: the sql to be executed
:type sql: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
"""
@apply_defaults
def __init__(self, sql,
pass_value,
tolerance=None,
bigquery_conn_id='bigquery_default',
use_legacy_sql=True,
*args, **kwargs):
super(BigQueryValueCheckOperator, self).__init__(
sql=sql, pass_value=pass_value, tolerance=tolerance,
*args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql)
class BigQueryIntervalCheckOperator(IntervalCheckOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
This method constructs a query like so ::
SELECT {metrics_threshold_dict_key} FROM {table}
WHERE {date_filter_column}=<date>
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:type metrics_threshold: dict
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
"""
@apply_defaults
def __init__(self, table, metrics_thresholds, date_filter_column='ds',
days_back=-7, bigquery_conn_id='bigquery_default',
use_legacy_sql=True, *args, **kwargs):
super(BigQueryIntervalCheckOperator, self).__init__(
table=table, metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column, days_back=days_back,
*args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql)
|
fvcproductions/dotfiles | refs/heads/master | bin/alfred/Alfred.alfredpreferences/workflows/user.workflow.DEDF5652-6FEF-4776-80D8-ACEDF577D06A/bs4/dammit.py | 23 | # -*- coding: utf-8 -*-
"""Beautiful Soup bonus library: Unicode, Dammit
This library converts a bytestream to Unicode through any means
necessary. It is heavily based on code from Mark Pilgrim's Universal
Feed Parser. It works best on XML and HTML, but it does not rewrite the
XML or HTML to reflect a new encoding; that's the tree builder's job.
"""
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__license__ = "MIT"
import codecs
from htmlentitydefs import codepoint2name
import re
import logging
import string
# Import a library to autodetect character encodings.
chardet_type = None
try:
# First try the fast C implementation.
# PyPI package: cchardet
import cchardet
def chardet_dammit(s):
return cchardet.detect(s)['encoding']
except ImportError:
try:
# Fall back to the pure Python implementation
# Debian package: python-chardet
# PyPI package: chardet
import chardet
def chardet_dammit(s):
return chardet.detect(s)['encoding']
#import chardet.constants
#chardet.constants._debug = 1
except ImportError:
# No chardet available.
def chardet_dammit(s):
return None
# Available from http://cjkpython.i18n.org/.
try:
import iconv_codec
except ImportError:
pass
xml_encoding_re = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I)
html_meta_re = re.compile(
'<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I)
class EntitySubstitution(object):
"""Substitute XML or HTML entities for the corresponding characters."""
def _populate_class_variables():
lookup = {}
reverse_lookup = {}
characters_for_re = []
for codepoint, name in list(codepoint2name.items()):
character = unichr(codepoint)
if codepoint != 34:
# There's no point in turning the quotation mark into
# ", unless it happens within an attribute value, which
# is handled elsewhere.
characters_for_re.append(character)
lookup[character] = name
# But we do want to turn " into the quotation mark.
reverse_lookup[name] = character
re_definition = "[%s]" % "".join(characters_for_re)
return lookup, reverse_lookup, re.compile(re_definition)
(CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER,
CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables()
CHARACTER_TO_XML_ENTITY = {
"'": "apos",
'"': "quot",
"&": "amp",
"<": "lt",
">": "gt",
}
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
"&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
")")
AMPERSAND_OR_BRACKET = re.compile("([<>&])")
@classmethod
def _substitute_html_entity(cls, matchobj):
entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
return "&%s;" % entity
@classmethod
def _substitute_xml_entity(cls, matchobj):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
return "&%s;" % entity
@classmethod
def quoted_attribute_value(self, value):
"""Make a value into a quoted XML attribute, possibly escaping it.
Most strings will be quoted using double quotes.
Bob's Bar -> "Bob's Bar"
If a string contains double quotes, it will be quoted using
single quotes.
Welcome to "my bar" -> 'Welcome to "my bar"'
If a string contains both single and double quotes, the
double quotes will be escaped, and the string will be quoted
using double quotes.
Welcome to "Bob's Bar" -> "Welcome to "Bob's bar"
"""
quote_with = '"'
if '"' in value:
if "'" in value:
# The string contains both single and double
# quotes. Turn the double quotes into
# entities. We quote the double quotes rather than
# the single quotes because the entity name is
# """ whether this is HTML or XML. If we
# quoted the single quotes, we'd have to decide
# between ' and &squot;.
replace_with = """
value = value.replace('"', replace_with)
else:
# There are double quotes but no single quotes.
# We can use single quotes to quote the attribute.
quote_with = "'"
return quote_with + value + quote_with
@classmethod
def substitute_xml(cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign
will become <, the greater-than sign will become >,
and any ampersands will become &. If you want ampersands
that appear to be part of an entity definition to be left
alone, use substitute_xml_containing_entities() instead.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets and ampersands.
value = cls.AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_xml_containing_entities(
cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign will
become <, the greater-than sign will become >, and any
ampersands that are not part of an entity defition will
become &.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets, and ampersands that aren't part of
# entities.
value = cls.BARE_AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_html(cls, s):
"""Replace certain Unicode characters with named HTML entities.
This differs from data.encode(encoding, 'xmlcharrefreplace')
in that the goal is to make the result more readable (to those
with ASCII displays) rather than to recover from
errors. There's absolutely nothing wrong with a UTF-8 string
containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
character with "é" will make it more readable to some
people.
"""
return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(
cls._substitute_html_entity, s)
class EncodingDetector:
"""Suggests a number of possible encodings for a bytestring.
Order of precedence:
1. Encodings you specifically tell EncodingDetector to try first
(the override_encodings argument to the constructor).
2. An encoding declared within the bytestring itself, either in an
XML declaration (if the bytestring is to be interpreted as an XML
document), or in a <meta> tag (if the bytestring is to be
interpreted as an HTML document.)
3. An encoding detected through textual analysis by chardet,
cchardet, or a similar external library.
4. UTF-8.
5. Windows-1252.
"""
def __init__(self, markup, override_encodings=None, is_html=False,
exclude_encodings=None):
self.override_encodings = override_encodings or []
exclude_encodings = exclude_encodings or []
self.exclude_encodings = set([x.lower() for x in exclude_encodings])
self.chardet_encoding = None
self.is_html = is_html
self.declared_encoding = None
# First order of business: strip a byte-order mark.
self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup)
def _usable(self, encoding, tried):
if encoding is not None:
encoding = encoding.lower()
if encoding in self.exclude_encodings:
return False
if encoding not in tried:
tried.add(encoding)
return True
return False
@property
def encodings(self):
"""Yield a number of encodings that might work for this markup."""
tried = set()
for e in self.override_encodings:
if self._usable(e, tried):
yield e
# Did the document originally start with a byte-order mark
# that indicated its encoding?
if self._usable(self.sniffed_encoding, tried):
yield self.sniffed_encoding
# Look within the document for an XML or HTML encoding
# declaration.
if self.declared_encoding is None:
self.declared_encoding = self.find_declared_encoding(
self.markup, self.is_html)
if self._usable(self.declared_encoding, tried):
yield self.declared_encoding
# Use third-party character set detection to guess at the
# encoding.
if self.chardet_encoding is None:
self.chardet_encoding = chardet_dammit(self.markup)
if self._usable(self.chardet_encoding, tried):
yield self.chardet_encoding
# As a last-ditch effort, try utf-8 and windows-1252.
for e in ('utf-8', 'windows-1252'):
if self._usable(e, tried):
yield e
@classmethod
def strip_byte_order_mark(cls, data):
"""If a byte-order mark is present, strip it and return the encoding it implies."""
encoding = None
if isinstance(data, unicode):
# Unicode data cannot have a byte-order mark.
return data, encoding
if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == b'\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == b'\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == b'\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
return data, encoding
@classmethod
def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False):
"""Given a document, tries to find its declared encoding.
An XML encoding is declared at the beginning of the document.
An HTML encoding is declared in a <meta> tag, hopefully near the
beginning of the document.
"""
if search_entire_document:
xml_endpos = html_endpos = len(markup)
else:
xml_endpos = 1024
html_endpos = max(2048, int(len(markup) * 0.05))
declared_encoding = None
declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos)
if not declared_encoding_match and is_html:
declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos)
if declared_encoding_match is not None:
declared_encoding = declared_encoding_match.groups()[0].decode(
'ascii', 'replace')
if declared_encoding:
return declared_encoding.lower()
return None
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = {"macintosh": "mac-roman",
"x-sjis": "shift-jis"}
ENCODINGS_WITH_SMART_QUOTES = [
"windows-1252",
"iso-8859-1",
"iso-8859-2",
]
def __init__(self, markup, override_encodings=[],
smart_quotes_to=None, is_html=False, exclude_encodings=[]):
self.smart_quotes_to = smart_quotes_to
self.tried_encodings = []
self.contains_replacement_characters = False
self.is_html = is_html
self.log = logging.getLogger(__name__)
self.detector = EncodingDetector(
markup, override_encodings, is_html, exclude_encodings)
# Short-circuit if the data is in Unicode to begin with.
if isinstance(markup, unicode) or markup == '':
self.markup = markup
self.unicode_markup = unicode(markup)
self.original_encoding = None
return
# The encoding detector may have stripped a byte-order mark.
# Use the stripped markup from this point on.
self.markup = self.detector.markup
u = None
for encoding in self.detector.encodings:
markup = self.detector.markup
u = self._convert_from(encoding)
if u is not None:
break
if not u:
# None of the encodings worked. As an absolute last resort,
# try them again with character replacement.
for encoding in self.detector.encodings:
if encoding != "ascii":
u = self._convert_from(encoding, "replace")
if u is not None:
self.log.warning(
"Some characters could not be decoded, and were "
"replaced with REPLACEMENT CHARACTER."
)
self.contains_replacement_characters = True
break
# If none of that worked, we could at this point force it to
# ASCII, but that would destroy so much data that I think
# giving up is better.
self.unicode_markup = u
if not u:
self.original_encoding = None
def _sub_ms_char(self, match):
"""Changes a MS smart quote character to an XML or HTML
entity, or an ASCII character."""
orig = match.group(1)
if self.smart_quotes_to == 'ascii':
sub = self.MS_CHARS_TO_ASCII.get(orig).encode()
else:
sub = self.MS_CHARS.get(orig)
if type(sub) == tuple:
if self.smart_quotes_to == 'xml':
sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
else:
sub = '&'.encode() + sub[0].encode() + ';'.encode()
else:
sub = sub.encode()
return sub
def _convert_from(self, proposed, errors="strict"):
proposed = self.find_codec(proposed)
if not proposed or (proposed, errors) in self.tried_encodings:
return None
self.tried_encodings.append((proposed, errors))
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if (self.smart_quotes_to is not None
and proposed in self.ENCODINGS_WITH_SMART_QUOTES):
smart_quotes_re = b"([\x80-\x9f])"
smart_quotes_compiled = re.compile(smart_quotes_re)
markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
try:
#print "Trying to convert document to %s (errors=%s)" % (
# proposed, errors)
u = self._to_unicode(markup, proposed, errors)
self.markup = u
self.original_encoding = proposed
except Exception as e:
#print "That didn't work!"
#print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _to_unicode(self, data, encoding, errors="strict"):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
return unicode(data, encoding, errors)
@property
def declared_html_encoding(self):
if not self.is_html:
return None
return self.detector.declared_encoding
def find_codec(self, charset):
value = (self._codec(self.CHARSET_ALIASES.get(charset, charset))
or (charset and self._codec(charset.replace("-", "")))
or (charset and self._codec(charset.replace("-", "_")))
or (charset and charset.lower())
or charset
)
if value:
return value.lower()
return None
def _codec(self, charset):
if not charset:
return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
# A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities.
MS_CHARS = {b'\x80': ('euro', '20AC'),
b'\x81': ' ',
b'\x82': ('sbquo', '201A'),
b'\x83': ('fnof', '192'),
b'\x84': ('bdquo', '201E'),
b'\x85': ('hellip', '2026'),
b'\x86': ('dagger', '2020'),
b'\x87': ('Dagger', '2021'),
b'\x88': ('circ', '2C6'),
b'\x89': ('permil', '2030'),
b'\x8A': ('Scaron', '160'),
b'\x8B': ('lsaquo', '2039'),
b'\x8C': ('OElig', '152'),
b'\x8D': '?',
b'\x8E': ('#x17D', '17D'),
b'\x8F': '?',
b'\x90': '?',
b'\x91': ('lsquo', '2018'),
b'\x92': ('rsquo', '2019'),
b'\x93': ('ldquo', '201C'),
b'\x94': ('rdquo', '201D'),
b'\x95': ('bull', '2022'),
b'\x96': ('ndash', '2013'),
b'\x97': ('mdash', '2014'),
b'\x98': ('tilde', '2DC'),
b'\x99': ('trade', '2122'),
b'\x9a': ('scaron', '161'),
b'\x9b': ('rsaquo', '203A'),
b'\x9c': ('oelig', '153'),
b'\x9d': '?',
b'\x9e': ('#x17E', '17E'),
b'\x9f': ('Yuml', ''),}
# A parochial partial mapping of ISO-Latin-1 to ASCII. Contains
# horrors like stripping diacritical marks to turn á into a, but also
# contains non-horrors like turning “ into ".
MS_CHARS_TO_ASCII = {
b'\x80' : 'EUR',
b'\x81' : ' ',
b'\x82' : ',',
b'\x83' : 'f',
b'\x84' : ',,',
b'\x85' : '...',
b'\x86' : '+',
b'\x87' : '++',
b'\x88' : '^',
b'\x89' : '%',
b'\x8a' : 'S',
b'\x8b' : '<',
b'\x8c' : 'OE',
b'\x8d' : '?',
b'\x8e' : 'Z',
b'\x8f' : '?',
b'\x90' : '?',
b'\x91' : "'",
b'\x92' : "'",
b'\x93' : '"',
b'\x94' : '"',
b'\x95' : '*',
b'\x96' : '-',
b'\x97' : '--',
b'\x98' : '~',
b'\x99' : '(TM)',
b'\x9a' : 's',
b'\x9b' : '>',
b'\x9c' : 'oe',
b'\x9d' : '?',
b'\x9e' : 'z',
b'\x9f' : 'Y',
b'\xa0' : ' ',
b'\xa1' : '!',
b'\xa2' : 'c',
b'\xa3' : 'GBP',
b'\xa4' : '$', #This approximation is especially parochial--this is the
#generic currency symbol.
b'\xa5' : 'YEN',
b'\xa6' : '|',
b'\xa7' : 'S',
b'\xa8' : '..',
b'\xa9' : '',
b'\xaa' : '(th)',
b'\xab' : '<<',
b'\xac' : '!',
b'\xad' : ' ',
b'\xae' : '(R)',
b'\xaf' : '-',
b'\xb0' : 'o',
b'\xb1' : '+-',
b'\xb2' : '2',
b'\xb3' : '3',
b'\xb4' : ("'", 'acute'),
b'\xb5' : 'u',
b'\xb6' : 'P',
b'\xb7' : '*',
b'\xb8' : ',',
b'\xb9' : '1',
b'\xba' : '(th)',
b'\xbb' : '>>',
b'\xbc' : '1/4',
b'\xbd' : '1/2',
b'\xbe' : '3/4',
b'\xbf' : '?',
b'\xc0' : 'A',
b'\xc1' : 'A',
b'\xc2' : 'A',
b'\xc3' : 'A',
b'\xc4' : 'A',
b'\xc5' : 'A',
b'\xc6' : 'AE',
b'\xc7' : 'C',
b'\xc8' : 'E',
b'\xc9' : 'E',
b'\xca' : 'E',
b'\xcb' : 'E',
b'\xcc' : 'I',
b'\xcd' : 'I',
b'\xce' : 'I',
b'\xcf' : 'I',
b'\xd0' : 'D',
b'\xd1' : 'N',
b'\xd2' : 'O',
b'\xd3' : 'O',
b'\xd4' : 'O',
b'\xd5' : 'O',
b'\xd6' : 'O',
b'\xd7' : '*',
b'\xd8' : 'O',
b'\xd9' : 'U',
b'\xda' : 'U',
b'\xdb' : 'U',
b'\xdc' : 'U',
b'\xdd' : 'Y',
b'\xde' : 'b',
b'\xdf' : 'B',
b'\xe0' : 'a',
b'\xe1' : 'a',
b'\xe2' : 'a',
b'\xe3' : 'a',
b'\xe4' : 'a',
b'\xe5' : 'a',
b'\xe6' : 'ae',
b'\xe7' : 'c',
b'\xe8' : 'e',
b'\xe9' : 'e',
b'\xea' : 'e',
b'\xeb' : 'e',
b'\xec' : 'i',
b'\xed' : 'i',
b'\xee' : 'i',
b'\xef' : 'i',
b'\xf0' : 'o',
b'\xf1' : 'n',
b'\xf2' : 'o',
b'\xf3' : 'o',
b'\xf4' : 'o',
b'\xf5' : 'o',
b'\xf6' : 'o',
b'\xf7' : '/',
b'\xf8' : 'o',
b'\xf9' : 'u',
b'\xfa' : 'u',
b'\xfb' : 'u',
b'\xfc' : 'u',
b'\xfd' : 'y',
b'\xfe' : 'b',
b'\xff' : 'y',
}
# A map used when removing rogue Windows-1252/ISO-8859-1
# characters in otherwise UTF-8 documents.
#
# Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in
# Windows-1252.
WINDOWS_1252_TO_UTF8 = {
0x80 : b'\xe2\x82\xac', # €
0x82 : b'\xe2\x80\x9a', # ‚
0x83 : b'\xc6\x92', # ƒ
0x84 : b'\xe2\x80\x9e', # „
0x85 : b'\xe2\x80\xa6', # …
0x86 : b'\xe2\x80\xa0', # †
0x87 : b'\xe2\x80\xa1', # ‡
0x88 : b'\xcb\x86', # ˆ
0x89 : b'\xe2\x80\xb0', # ‰
0x8a : b'\xc5\xa0', # Š
0x8b : b'\xe2\x80\xb9', # ‹
0x8c : b'\xc5\x92', # Œ
0x8e : b'\xc5\xbd', # Ž
0x91 : b'\xe2\x80\x98', # ‘
0x92 : b'\xe2\x80\x99', # ’
0x93 : b'\xe2\x80\x9c', # “
0x94 : b'\xe2\x80\x9d', # ”
0x95 : b'\xe2\x80\xa2', # •
0x96 : b'\xe2\x80\x93', # –
0x97 : b'\xe2\x80\x94', # —
0x98 : b'\xcb\x9c', # ˜
0x99 : b'\xe2\x84\xa2', # ™
0x9a : b'\xc5\xa1', # š
0x9b : b'\xe2\x80\xba', # ›
0x9c : b'\xc5\x93', # œ
0x9e : b'\xc5\xbe', # ž
0x9f : b'\xc5\xb8', # Ÿ
0xa0 : b'\xc2\xa0', #
0xa1 : b'\xc2\xa1', # ¡
0xa2 : b'\xc2\xa2', # ¢
0xa3 : b'\xc2\xa3', # £
0xa4 : b'\xc2\xa4', # ¤
0xa5 : b'\xc2\xa5', # ¥
0xa6 : b'\xc2\xa6', # ¦
0xa7 : b'\xc2\xa7', # §
0xa8 : b'\xc2\xa8', # ¨
0xa9 : b'\xc2\xa9', # ©
0xaa : b'\xc2\xaa', # ª
0xab : b'\xc2\xab', # «
0xac : b'\xc2\xac', # ¬
0xad : b'\xc2\xad', #
0xae : b'\xc2\xae', # ®
0xaf : b'\xc2\xaf', # ¯
0xb0 : b'\xc2\xb0', # °
0xb1 : b'\xc2\xb1', # ±
0xb2 : b'\xc2\xb2', # ²
0xb3 : b'\xc2\xb3', # ³
0xb4 : b'\xc2\xb4', # ´
0xb5 : b'\xc2\xb5', # µ
0xb6 : b'\xc2\xb6', # ¶
0xb7 : b'\xc2\xb7', # ·
0xb8 : b'\xc2\xb8', # ¸
0xb9 : b'\xc2\xb9', # ¹
0xba : b'\xc2\xba', # º
0xbb : b'\xc2\xbb', # »
0xbc : b'\xc2\xbc', # ¼
0xbd : b'\xc2\xbd', # ½
0xbe : b'\xc2\xbe', # ¾
0xbf : b'\xc2\xbf', # ¿
0xc0 : b'\xc3\x80', # À
0xc1 : b'\xc3\x81', # Á
0xc2 : b'\xc3\x82', # Â
0xc3 : b'\xc3\x83', # Ã
0xc4 : b'\xc3\x84', # Ä
0xc5 : b'\xc3\x85', # Å
0xc6 : b'\xc3\x86', # Æ
0xc7 : b'\xc3\x87', # Ç
0xc8 : b'\xc3\x88', # È
0xc9 : b'\xc3\x89', # É
0xca : b'\xc3\x8a', # Ê
0xcb : b'\xc3\x8b', # Ë
0xcc : b'\xc3\x8c', # Ì
0xcd : b'\xc3\x8d', # Í
0xce : b'\xc3\x8e', # Î
0xcf : b'\xc3\x8f', # Ï
0xd0 : b'\xc3\x90', # Ð
0xd1 : b'\xc3\x91', # Ñ
0xd2 : b'\xc3\x92', # Ò
0xd3 : b'\xc3\x93', # Ó
0xd4 : b'\xc3\x94', # Ô
0xd5 : b'\xc3\x95', # Õ
0xd6 : b'\xc3\x96', # Ö
0xd7 : b'\xc3\x97', # ×
0xd8 : b'\xc3\x98', # Ø
0xd9 : b'\xc3\x99', # Ù
0xda : b'\xc3\x9a', # Ú
0xdb : b'\xc3\x9b', # Û
0xdc : b'\xc3\x9c', # Ü
0xdd : b'\xc3\x9d', # Ý
0xde : b'\xc3\x9e', # Þ
0xdf : b'\xc3\x9f', # ß
0xe0 : b'\xc3\xa0', # à
0xe1 : b'\xa1', # á
0xe2 : b'\xc3\xa2', # â
0xe3 : b'\xc3\xa3', # ã
0xe4 : b'\xc3\xa4', # ä
0xe5 : b'\xc3\xa5', # å
0xe6 : b'\xc3\xa6', # æ
0xe7 : b'\xc3\xa7', # ç
0xe8 : b'\xc3\xa8', # è
0xe9 : b'\xc3\xa9', # é
0xea : b'\xc3\xaa', # ê
0xeb : b'\xc3\xab', # ë
0xec : b'\xc3\xac', # ì
0xed : b'\xc3\xad', # í
0xee : b'\xc3\xae', # î
0xef : b'\xc3\xaf', # ï
0xf0 : b'\xc3\xb0', # ð
0xf1 : b'\xc3\xb1', # ñ
0xf2 : b'\xc3\xb2', # ò
0xf3 : b'\xc3\xb3', # ó
0xf4 : b'\xc3\xb4', # ô
0xf5 : b'\xc3\xb5', # õ
0xf6 : b'\xc3\xb6', # ö
0xf7 : b'\xc3\xb7', # ÷
0xf8 : b'\xc3\xb8', # ø
0xf9 : b'\xc3\xb9', # ù
0xfa : b'\xc3\xba', # ú
0xfb : b'\xc3\xbb', # û
0xfc : b'\xc3\xbc', # ü
0xfd : b'\xc3\xbd', # ý
0xfe : b'\xc3\xbe', # þ
}
MULTIBYTE_MARKERS_AND_SIZES = [
(0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF
(0xe0, 0xef, 3), # 3-byte characters start with E0-EF
(0xf0, 0xf4, 4), # 4-byte characters start with F0-F4
]
FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0]
LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1]
@classmethod
def detwingle(cls, in_bytes, main_encoding="utf8",
embedded_encoding="windows-1252"):
"""Fix characters from one encoding embedded in some other encoding.
Currently the only situation supported is Windows-1252 (or its
subset ISO-8859-1), embedded in UTF-8.
The input must be a bytestring. If you've already converted
the document to Unicode, you're too late.
The output is a bytestring in which `embedded_encoding`
characters have been converted to their `main_encoding`
equivalents.
"""
if embedded_encoding.replace('_', '-').lower() not in (
'windows-1252', 'windows_1252'):
raise NotImplementedError(
"Windows-1252 and ISO-8859-1 are the only currently supported "
"embedded encodings.")
if main_encoding.lower() not in ('utf8', 'utf-8'):
raise NotImplementedError(
"UTF-8 is the only currently supported main encoding.")
byte_chunks = []
chunk_start = 0
pos = 0
while pos < len(in_bytes):
byte = in_bytes[pos]
if not isinstance(byte, int):
# Python 2.x
byte = ord(byte)
if (byte >= cls.FIRST_MULTIBYTE_MARKER
and byte <= cls.LAST_MULTIBYTE_MARKER):
# This is the start of a UTF-8 multibyte character. Skip
# to the end.
for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES:
if byte >= start and byte <= end:
pos += size
break
elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8:
# We found a Windows-1252 character!
# Save the string up to this point as a chunk.
byte_chunks.append(in_bytes[chunk_start:pos])
# Now translate the Windows-1252 character into UTF-8
# and add it as another, one-byte chunk.
byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte])
pos += 1
chunk_start = pos
else:
# Go on to the next character.
pos += 1
if chunk_start == 0:
# The string is unchanged.
return in_bytes
else:
# Store the final chunk.
byte_chunks.append(in_bytes[chunk_start:])
return b''.join(byte_chunks)
|
grnet/agkyra | refs/heads/master | agkyra/__init__.py | 196 | __version__ = '0.1'
|
martynovp/edx-platform | refs/heads/master | common/test/acceptance/pages/lms/fields.py | 26 | # -*- coding: utf-8 -*-
"""
Mixins for fields.
"""
from bok_choy.promise import EmptyPromise
from ...tests.helpers import get_selected_option_text, select_option_by_text
class FieldsMixin(object):
"""
Methods for testing fields in pages.
"""
def field(self, field_id):
"""
Return field with field_id.
"""
query = self.q(css='.u-field-{}'.format(field_id))
return query.text[0] if query.present else None
def wait_for_field(self, field_id):
"""
Wait for a field to appear in DOM.
"""
EmptyPromise(
lambda: self.field(field_id) is not None,
"Field with id \"{0}\" is in DOM.".format(field_id)
).fulfill()
def mode_for_field(self, field_id):
"""
Extract current field mode.
Returns:
`placeholder`/`edit`/`display`
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{}'.format(field_id))
if not query.present:
return None
field_classes = query.attrs('class')[0].split()
if 'mode-placeholder' in field_classes:
return 'placeholder'
if 'mode-display' in field_classes:
return 'display'
if 'mode-edit' in field_classes:
return 'edit'
def icon_for_field(self, field_id, icon_id):
"""
Check if field icon is present.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} .u-field-icon'.format(field_id))
return query.present and icon_id in query.attrs('class')[0].split()
def title_for_field(self, field_id):
"""
Return the title of a field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} .u-field-title'.format(field_id))
return query.text[0] if query.present else None
def message_for_field(self, field_id):
"""
Return the current message in a field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} .u-field-message'.format(field_id))
return query.text[0] if query.present else None
def wait_for_message(self, field_id, message):
"""
Wait for a message to appear in a field.
"""
EmptyPromise(
lambda: message in (self.message_for_field(field_id) or ''),
"Messsage \"{0}\" is visible.".format(message)
).fulfill()
def indicator_for_field(self, field_id):
"""
Return the name of the current indicator in a field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} .u-field-message i'.format(field_id))
return [
class_name for class_name
in query.attrs('class')[0].split(' ')
if class_name.startswith('message')
][0].partition('-')[2] if query.present else None
def wait_for_indicator(self, field_id, indicator):
"""
Wait for an indicator to appear in a field.
"""
EmptyPromise(
lambda: indicator == self.indicator_for_field(field_id),
"Indicator \"{0}\" is visible.".format(self.indicator_for_field(field_id))
).fulfill()
def make_field_editable(self, field_id):
"""
Make a field editable.
"""
query = self.q(css='.u-field-{}'.format(field_id))
if not query.present:
return None
field_classes = query.attrs('class')[0].split()
if 'mode-placeholder' in field_classes or 'mode-display' in field_classes:
if field_id == 'bio':
self.q(css='.u-field-bio > .wrapper-u-field').first.click()
else:
self.q(css='.u-field-{}'.format(field_id)).first.click()
def value_for_readonly_field(self, field_id):
"""
Return the value in a readonly field.
"""
self.wait_for_field(field_id)
return self.value_for_text_field(field_id)
def value_for_text_field(self, field_id, value=None):
"""
Get or set the value of a text field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} input'.format(field_id))
if not query.present:
return None
if value is not None:
current_value = query.attrs('value')[0]
query.results[0].send_keys(u'\ue003' * len(current_value)) # Delete existing value.
query.results[0].send_keys(value) # Input new value
query.results[0].send_keys(u'\ue007') # Press Enter
return query.attrs('value')[0]
def value_for_textarea_field(self, field_id, value=None):
"""
Get or set the value of a textarea field.
"""
self.wait_for_field(field_id)
self.make_field_editable(field_id)
query = self.q(css='.u-field-{} textarea'.format(field_id))
if not query.present:
return None
if value is not None:
query.fill(value)
query.results[0].send_keys(u'\ue004') # Focus Out using TAB
if self.mode_for_field(field_id) == 'edit':
return query.text[0]
else:
return self.get_non_editable_mode_value(field_id)
def get_non_editable_mode_value(self, field_id):
"""
Return value of field in `display` or `placeholder` mode.
"""
self.wait_for_field(field_id)
return self.q(css='.u-field-{} .u-field-value .u-field-value-readonly'.format(field_id)).text[0]
def value_for_dropdown_field(self, field_id, value=None):
"""
Get or set the value in a dropdown field.
"""
self.wait_for_field(field_id)
self.make_field_editable(field_id)
query = self.q(css='.u-field-{} select'.format(field_id))
if not query.present:
return None
if value is not None:
select_option_by_text(query, value)
if self.mode_for_field(field_id) == 'edit':
return get_selected_option_text(query)
else:
return self.get_non_editable_mode_value(field_id)
def link_title_for_link_field(self, field_id):
"""
Return the title of the link in a link field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-link-title-{}'.format(field_id))
return query.text[0] if query.present else None
def wait_for_link_title_for_link_field(self, field_id, expected_title):
"""
Wait until the title of the specified link field equals expected_title.
"""
return EmptyPromise(
lambda: self.link_title_for_link_field(field_id) == expected_title,
"Link field with link title \"{0}\" is visible.".format(expected_title)
).fulfill()
def click_on_link_in_link_field(self, field_id):
"""
Click the link in a link field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} a'.format(field_id))
if query.present:
query.first.click()
|
DrLuke/gpnshader | refs/heads/master | oldmodules/openglbase.py | 1 | __author__ = 'drluke'
from baseModule import BaseNode, Pin
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.arrays import *
from OpenGL.GL import shaders
import OpenGL
from OpenGL.GL.ARB.framebuffer_object import *
from OpenGL.GL.EXT.framebuffer_object import *
from PyQt5.QtWidgets import QDialog
import numpy as np
import traceback
import os
import PIL
from PIL import Image, ImageFont, ImageDraw
__nodes__ = ["Quad", "ShaderProgram", "RenderVAO", "UniformsContainer", "TextureContainer"]
class Quad(BaseNode):
nodeName = "drluke.openglbase.Quad"
name = "Quad"
desc = "This is a quad shape, consisting of 2 triangles."
category = "Shapes"
placable = True
class settingsDialog(QDialog):
""" Dialog for setting vertex points """
def __init__(self, extraData, sheetview, sheethandler):
super().__init__()
print(extraData)
self.data = {"Test": "yes"}
def init(self):
vertices = np.array([
[1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0], # Top right
[-1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0, 1.0], # Top Left
[1.0, -1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0], # Bottom Right
[-1.0, -1.0, 0.0, 1.0, 1.0, 0.0, 0, 0], # Bottom Left
[1.0, -1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0], # Bottom Right
[-1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0, 1.0] # Top Left
], 'f')
self.vao = glGenVertexArrays(1)
self.vbo = glGenBuffers(1)
glBindVertexArray(self.vao)
glBindBuffer(GL_ARRAY_BUFFER, self.vbo)
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * vertices.itemsize, None)
glEnableVertexAttribArray(0)
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * vertices.itemsize, ctypes.c_void_p(3 * vertices.itemsize))
glEnableVertexAttribArray(1)
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * vertices.itemsize, ctypes.c_void_p(6 * vertices.itemsize))
glEnableVertexAttribArray(2)
glBindBuffer(GL_ARRAY_BUFFER, 0)
glBindVertexArray(0)
def delete(self):
glDeleteBuffers(1, [self.vbo])
glDeleteVertexArrays(1, [self.vao])
def getVao(self):
return self.vao
def getVbo(self):
return self.vbo
def getTricount(self):
return int(2)
inputDefs = [
]
outputDefs = [
Pin("vbo", "vbo", getVbo),
Pin("vao", "vao", getVao),
Pin("tricount", "int", getTricount)
]
class ShaderProgram(BaseNode):
nodeName = "drluke.openglbase.ShaderProgram"
name = "Shader Program"
desc = "Generate shader program from vertex and fragment shader."
category = "Shaders"
placable = True
def init(self):
self.defaultFragmentShaderCode = """
#version 330 core
in vec3 ourColor;
in vec2 ourTexcoord;
out vec4 outColor;
void main()
{
outColor = vec4(ourColor.r, ourColor.g, ourColor.b, 1.0);
}
"""
self.defaultVertexShaderCode = """
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec2 texcoord;
out vec3 ourColor;
out vec2 ourTexcoord;
void main()
{
gl_Position = vec4(position.x, position.y, position.z, 1.0);
ourColor = color;
ourTexcoord = texcoord;
}
"""
def run(self):
self.vertexShaderCode = self.getInput(1)
self.fragmentShaderCode = self.getInput(2)
# -- Fragment Shader
if not self.fragmentShaderCode or not isinstance(self.fragmentShaderCode, str):
self.fragmentShaderCode = self.defaultFragmentShaderCode
try:
self.fragmentShader = shaders.compileShader(self.fragmentShaderCode, GL_FRAGMENT_SHADER)
except:
print(traceback.print_exc())
self.fragmentShader = shaders.compileShader(self.defaultFragmentShaderCode, GL_FRAGMENT_SHADER)
# -- Vertex Shader
if not self.vertexShaderCode or not isinstance(self.vertexShaderCode, str):
self.vertexShaderCode = self.defaultVertexShaderCode
try:
self.vertexShader = shaders.compileShader(self.vertexShaderCode, GL_VERTEX_SHADER)
except:
print(traceback.print_exc())
self.vertexShader = shaders.compileShader(self.defaultVertexShaderCode, GL_VERTEX_SHADER)
# -- Generate Shader program
if isinstance(self.fragmentShader, int) and isinstance(self.vertexShader, int):
self.shaderprogram = shaders.compileProgram(self.fragmentShader, self.vertexShader)
self.fireExec(0)
def getShaderprogram(self):
return self.shaderprogram
inputDefs = [
Pin("Generate", "exec", run, "Create the shaderprogram with new input"),
Pin("Vertex Shader Code", "string", None),
Pin("Fragment Shader Code", "string", None)
]
outputDefs = [
Pin("exec", "exec", None),
Pin("Shader Program", "shaderprogram", getShaderprogram)
]
class RenderVAO(BaseNode):
nodeName = "drluke.openglbase.RenderVAO"
name = "Render VAO"
desc = "Render shader program with VAO"
category = "Shaders"
placable = True
def init(self):
pass
def run(self):
glEnable(GL_TEXTURE_2D)
#glEnable(GL_BLEND)
#glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.shaderprogram = self.getInput(1)
self.vao = self.getInput(2)
self.uniformsContainer = self.getInput(4)
self.textureContainer = self.getInput(5)
glUseProgram(self.shaderprogram)
if self.uniformsContainer is not None:
for uniformName in self.uniformsContainer:
uniformLoc = glGetUniformLocation(self.shaderprogram, uniformName)
if not uniformLoc == -1 and None not in self.uniformsContainer[uniformName]: # Location is valid
if len(self.uniformsContainer[uniformName]) == 1:
glUniform1f(uniformLoc, self.uniformsContainer[uniformName][0])
elif len(self.uniformsContainer[uniformName]) == 2:
glUniform2f(uniformLoc, self.uniformsContainer[uniformName][0], self.uniformsContainer[uniformName][1])
elif len(self.uniformsContainer[uniformName]) == 2:
glUniform3f(uniformLoc, self.uniformsContainer[uniformName][0], self.uniformsContainer[uniformName][1], self.uniformsContainer[uniformName][2])
elif len(self.uniformsContainer[uniformName]) == 2:
glUniform4f(uniformLoc, self.uniformsContainer[uniformName][0], self.uniformsContainer[uniformName][1], self.uniformsContainer[uniformName][2], self.uniformsContainer[uniformName][3])
if self.textureContainer is not None:
for key in self.textureContainer:
if key == 0:
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self.textureContainer[0])
elif key == 1:
glActiveTexture(GL_TEXTURE1)
glBindTexture(GL_TEXTURE_2D, self.textureContainer[1])
elif key == 2:
glActiveTexture(GL_TEXTURE2)
glBindTexture(GL_TEXTURE_2D, self.textureContainer[2])
elif key == 3:
glActiveTexture(GL_TEXTURE3)
glBindTexture(GL_TEXTURE_2D, self.textureContainer[3])
glBindVertexArray(self.vao)
glDrawArrays(GL_TRIANGLES, 0, self.getInput(3)*3)
glBindVertexArray(0)
#glDisable(GL_BLEND)
glDisable(GL_TEXTURE_2D)
inputDefs = [
Pin("exec", "exec", run, "Start render"),
Pin("Shader Program", "shaderprogram", None),
Pin("VAO", "vao", None),
Pin("Tricount", "int", None, "Number of Tris to render"),
Pin("Uniforms", "uniformscontainer", None),
Pin("Textures", "texturecontainer", None)
]
outputDefs = [
]
class UniformsContainer(BaseNode):
nodeName = "drluke.openglbase.UniformsContainer"
name = "Uniforms Container"
desc = "Add Uniforms to a container, and then pass them into a render node!"
category = "Shaders"
placable = True
def init(self):
self.uniformsContainer = None
def run(self):
self.uniformsContainer = self.getInput(1)
if self.uniformsContainer is None:
self.uniformsContainer = {}
if isinstance(self.getInput(2), str) and isinstance(self.getInput(3), list):
self.uniformsContainer[self.getInput(2)] = self.getInput(3)
self.fireExec(0)
def getContainer(self):
return self.uniformsContainer
inputDefs = [
Pin("Add", "exec", run, "Add Uniform to Container"),
Pin("Container", "uniformscontainer", None, "Leave this unconnected to create new container"),
Pin("Uniform Name", "string", None),
Pin("Uniform", "list", None)
]
outputDefs = [
Pin("exec", "exec", None),
Pin("Container", "uniformscontainer", getContainer),
]
class TextureContainer(BaseNode):
nodeName = "drluke.openglbase.TextureContainer"
name = "Texture Container"
desc = "Add Uniforms to a container, and then pass them into a render node!"
category = "Shaders"
placable = True
def init(self):
self.textureContainer = None
def run(self):
self.textureContainer = {}
if self.getInput(1) is not None and os.path.exists(self.getInput(1)):
img = Image.open(self.getInput(1))
img = img.convert('RGBA').transpose(PIL.Image.FLIP_TOP_BOTTOM)
img_data = np.array(list(img.getdata()), 'B')
self.textureContainer[0] = glGenTextures(1)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glBindTexture(GL_TEXTURE_2D, self.textureContainer[0])
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, img.size[0], img.size[1], 0, GL_RGBA, GL_UNSIGNED_BYTE, img_data)
glGenerateMipmap(GL_TEXTURE_2D)
#glBindTexture(0)
def getContainer(self):
return self.textureContainer
inputDefs = [
Pin("Create", "exec", run, "Add Uniform to Container"),
Pin("Path 0", "string", None),
Pin("Path 1", "string", None),
Pin("Path 2", "string", None),
Pin("Path 3", "string", None)
]
outputDefs = [
Pin("exec", "exec", None),
Pin("Textures", "texturecontainer", getContainer),
] |
ygenc/onlineLDA | refs/heads/master | onlineldavb_new/build/scipy/scipy/fftpack/setup.py | 11 | #!/usr/bin/env python
# Created by Pearu Peterson, August 2002
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fftpack',parent_package, top_path)
config.add_data_dir('tests')
config.add_data_dir('benchmarks')
config.add_library('dfftpack',
sources=[join('src/dfftpack','*.f')])
config.add_library('fftpack',
sources=[join('src/fftpack','*.f')])
sources = ['fftpack.pyf','src/zfft.c','src/drfft.c','src/zrfft.c',
'src/zfftnd.c', 'src/dct.c.src']
config.add_extension('_fftpack',
sources=sources,
libraries=['dfftpack', 'fftpack'],
include_dirs=['src'])
config.add_extension('convolve',
sources=['convolve.pyf','src/convolve.c'],
libraries=['dfftpack'],
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
from fftpack_version import fftpack_version
setup(version=fftpack_version,
description='fftpack - Discrete Fourier Transform package',
author='Pearu Peterson',
author_email = 'pearu@cens.ioc.ee',
maintainer_email = 'scipy-dev@scipy.org',
license = 'SciPy License (BSD Style)',
**configuration(top_path='').todict())
|
NeuralEnsemble/neuroConstruct | refs/heads/master | lib/jython/Lib/distutils/command/build_py.py | 176 | """distutils.command.build_py
Implements the Distutils 'build_py' command."""
__revision__ = "$Id$"
import os
import sys
from glob import glob
from distutils.core import Command
from distutils.errors import DistutilsOptionError, DistutilsFileError
from distutils.util import convert_path
from distutils import log
class build_py(Command):
description = "\"build\" pure Python modules (copy to build directory)"
user_options = [
('build-lib=', 'd', "directory to \"build\" (copy) to"),
('compile', 'c', "compile .py to .pyc"),
('no-compile', None, "don't compile .py files [default]"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['compile', 'force']
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
self.build_lib = None
self.py_modules = None
self.package = None
self.package_data = None
self.package_dir = None
self.compile = 0
self.optimize = 0
self.force = None
def finalize_options(self):
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('force', 'force'))
# Get the distribution options that are aliases for build_py
# options -- list of packages and list of modules.
self.packages = self.distribution.packages
self.py_modules = self.distribution.py_modules
self.package_data = self.distribution.package_data
self.package_dir = {}
if self.distribution.package_dir:
for name, path in self.distribution.package_dir.items():
self.package_dir[name] = convert_path(path)
self.data_files = self.get_data_files()
# Ick, copied straight from install_lib.py (fancy_getopt needs a
# type system! Hell, *everything* needs a type system!!!)
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
assert 0 <= self.optimize <= 2
except (ValueError, AssertionError):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self):
# XXX copy_file by default preserves atime and mtime. IMHO this is
# the right thing to do, but perhaps it should be an option -- in
# particular, a site administrator might want installed files to
# reflect the time of installation rather than the last
# modification time before the installed release.
# XXX copy_file by default preserves mode, which appears to be the
# wrong thing to do: if a file is read-only in the working
# directory, we want it to be installed read/write so that the next
# installation of the same module distribution can overwrite it
# without problems. (This might be a Unix-specific issue.) Thus
# we turn off 'preserve_mode' when copying to the build directory,
# since the build directory is supposed to be exactly what the
# installation will look like (ie. we preserve mode when
# installing).
# Two options control which modules will be installed: 'packages'
# and 'py_modules'. The former lets us work with whole packages, not
# specifying individual modules at all; the latter is for
# specifying modules one-at-a-time.
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.byte_compile(self.get_outputs(include_bytecode=0))
def get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
data = []
if not self.packages:
return data
for package in self.packages:
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = 0
if src_dir:
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = []
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
filelist = glob(os.path.join(src_dir, convert_path(pattern)))
# Files that match more than one pattern are only added once
files.extend([fn for fn in filelist if fn not in files])
return files
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(os.path.join(src_dir, filename), target,
preserve_mode=False)
def get_package_dir(self, package):
"""Return the directory, relative to the top of the source
distribution, where package 'package' should be found
(at least according to the 'package_dir' option, if any)."""
path = package.split('.')
if not self.package_dir:
if path:
return os.path.join(*path)
else:
return ''
else:
tail = []
while path:
try:
pdir = self.package_dir['.'.join(path)]
except KeyError:
tail.insert(0, path[-1])
del path[-1]
else:
tail.insert(0, pdir)
return os.path.join(*tail)
else:
# Oops, got all the way through 'path' without finding a
# match in package_dir. If package_dir defines a directory
# for the root (nameless) package, then fallback on it;
# otherwise, we might as well have not consulted
# package_dir at all, as we just use the directory implied
# by 'tail' (which should be the same as the original value
# of 'path' at this point).
pdir = self.package_dir.get('')
if pdir is not None:
tail.insert(0, pdir)
if tail:
return os.path.join(*tail)
else:
return ''
def check_package(self, package, package_dir):
# Empty dir name means current directory, which we can probably
# assume exists. Also, os.path.exists and isdir don't know about
# my "empty string means current dir" convention, so we have to
# circumvent them.
if package_dir != "":
if not os.path.exists(package_dir):
raise DistutilsFileError(
"package directory '%s' does not exist" % package_dir)
if not os.path.isdir(package_dir):
raise DistutilsFileError(
"supposed package directory '%s' exists, "
"but is not a directory" % package_dir)
# Require __init__.py for all but the "root package"
if package:
init_py = os.path.join(package_dir, "__init__.py")
if os.path.isfile(init_py):
return init_py
else:
log.warn(("package init file '%s' not found " +
"(or not a regular file)"), init_py)
# Either not in a package at all (__init__.py not expected), or
# __init__.py doesn't exist -- so don't return the filename.
return None
def check_module(self, module, module_file):
if not os.path.isfile(module_file):
log.warn("file %s (for module %s) not found", module_file, module)
return False
else:
return True
def find_package_modules(self, package, package_dir):
self.check_package(package, package_dir)
module_files = glob(os.path.join(package_dir, "*.py"))
modules = []
setup_script = os.path.abspath(self.distribution.script_name)
for f in module_files:
abs_f = os.path.abspath(f)
if abs_f != setup_script:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
else:
self.debug_print("excluding %s" % setup_script)
return modules
def find_modules(self):
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages = {}
# List of (package, module, filename) tuples to return
modules = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = module.split('.')
package = '.'.join(path[0:-1])
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package)
checked = 0
if not checked:
init_py = self.check_package(package, package_dir)
packages[package] = (package_dir, 1)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
if not self.check_module(module, module_file):
continue
modules.append((package, module_base, module_file))
return modules
def find_all_modules(self):
"""Compute the list of all modules that will be built, whether
they are specified one-module-at-a-time ('self.py_modules') or
by whole packages ('self.packages'). Return a list of tuples
(package, module, module_file), just like 'find_modules()' and
'find_package_modules()' do."""
modules = []
if self.py_modules:
modules.extend(self.find_modules())
if self.packages:
for package in self.packages:
package_dir = self.get_package_dir(package)
m = self.find_package_modules(package, package_dir)
modules.extend(m)
return modules
def get_source_files(self):
return [module[-1] for module in self.find_all_modules()]
def get_module_outfile(self, build_dir, package, module):
outfile_path = [build_dir] + list(package) + [module + ".py"]
return os.path.join(*outfile_path)
def get_outputs(self, include_bytecode=1):
modules = self.find_all_modules()
outputs = []
for (package, module, module_file) in modules:
package = package.split('.')
filename = self.get_module_outfile(self.build_lib, package, module)
outputs.append(filename)
if include_bytecode:
if self.compile:
outputs.append(filename + "c")
if self.optimize > 0:
outputs.append(filename + "o")
outputs += [
os.path.join(build_dir, filename)
for package, src_dir, build_dir, filenames in self.data_files
for filename in filenames
]
return outputs
def build_module(self, module, module_file, package):
if isinstance(package, str):
package = package.split('.')
elif not isinstance(package, (list, tuple)):
raise TypeError(
"'package' must be a string (dot-separated), list, or tuple")
# Now put the module source file into the "build" area -- this is
# easy, we just copy it somewhere under self.build_lib (the build
# directory for Python source).
outfile = self.get_module_outfile(self.build_lib, package, module)
dir = os.path.dirname(outfile)
self.mkpath(dir)
return self.copy_file(module_file, outfile, preserve_mode=0)
def build_modules(self):
modules = self.find_modules()
for (package, module, module_file) in modules:
# Now "build" the module -- ie. copy the source file to
# self.build_lib (the build directory for Python source).
# (Actually, it gets copied to the directory for this package
# under self.build_lib.)
self.build_module(module, module_file, package)
def build_packages(self):
for package in self.packages:
# Get list of (package, module, module_file) tuples based on
# scanning the package directory. 'package' is only included
# in the tuple so that 'find_modules()' and
# 'find_package_tuples()' have a consistent interface; it's
# ignored here (apart from a sanity check). Also, 'module' is
# the *unqualified* module name (ie. no dots, no package -- we
# already know its package!), and 'module_file' is the path to
# the .py file, relative to the current directory
# (ie. including 'package_dir').
package_dir = self.get_package_dir(package)
modules = self.find_package_modules(package, package_dir)
# Now loop over the modules we found, "building" each one (just
# copy it to self.build_lib).
for (package_, module, module_file) in modules:
assert package == package_
self.build_module(module, module_file, package)
def byte_compile(self, files):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
prefix = self.build_lib
if prefix[-1] != os.sep:
prefix = prefix + os.sep
# XXX this code is essentially the same as the 'byte_compile()
# method of the "install_lib" command, except for the determination
# of the 'prefix' string. Hmmm.
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=prefix, dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=prefix, dry_run=self.dry_run)
|
manipopopo/tensorflow | refs/heads/master | tensorflow/contrib/hooks/python/__init__.py | 140 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental `SessionRunHooks` for use with `MonitoredSession`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.hooks.python.training import *
# pylint: enable=wildcard-import
|
Cynerva/cxmanage | refs/heads/master | cxmanage_api/tests/fabric_test.py | 4 | # pylint: disable=protected-access
# pylint: disable=too-many-public-methods
# Copyright (c) 2012-2013, Calxeda Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Calxeda Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""Calxeda: fabric_test.py """
import random
import unittest
from mock import call
from cxmanage_api.fabric import Fabric
from cxmanage_api.tftp import InternalTftp, ExternalTftp
from cxmanage_api.firmware_package import FirmwarePackage
from cxmanage_api.cx_exceptions import CommandFailedError
from cxmanage_api.tests import DummyNode, DummyFailNode
class FabricTest(unittest.TestCase):
""" Test the various Fabric commands """
def setUp(self):
# Set up the controller and add targets
self.fabric = Fabric(DummyNode.ip_addresses[0], node=DummyNode)
self.nodes = [DummyNode(i) for i in DummyNode.ip_addresses]
self.fabric._nodes = dict((i, self.nodes[i])
for i in xrange(len(self.nodes)))
def test_tftp(self):
""" Test the tftp property """
tftp = InternalTftp()
self.fabric.tftp = tftp
self.assertTrue(self.fabric.tftp is tftp)
for node in self.nodes:
self.assertTrue(node.tftp is tftp)
tftp = ExternalTftp("127.0.0.1")
self.fabric.tftp = tftp
self.assertTrue(self.fabric.tftp is tftp)
for node in self.nodes:
self.assertTrue(node.tftp is tftp)
def test_get_mac_addresses(self):
""" Test get_mac_addresses command """
self.fabric.get_mac_addresses()
self.assertEqual(
self.nodes[0].method_calls,
[call.get_fabric_macaddrs()]
)
for node in self.nodes[1:]:
self.assertEqual(node.method_calls, [])
def test_get_uplink_info(self):
""" Test get_uplink_info command """
self.fabric.get_uplink_info()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_uplink_info()])
def test_get_uplink_speed(self):
""" Test get_uplink_speed command """
self.fabric.get_uplink_speed()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_uplink_speed()])
def test_get_uplink(self):
""" Test get_uplink command """
self.assertEqual(self.fabric.get_uplink(iface=0), 0)
def test_set_uplink(self):
""" Test set_uplink command """
iface, uplink = 0, 0
self.fabric.set_uplink(iface=iface, uplink=uplink)
self.assertEqual(
self.nodes[0].bmc.method_calls,
[call.fabric_config_set_uplink(iface=iface, uplink=uplink)]
)
def test_get_sensors(self):
""" Test get_sensors command """
self.fabric.get_sensors()
self.fabric.get_sensors("Node Power")
for node in self.nodes:
self.assertEqual(node.method_calls, [
call.get_sensors(""), call.get_sensors("Node Power")
])
def test_get_firmware_info(self):
""" Test get_firmware_info command """
self.fabric.get_firmware_info()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_firmware_info()])
def test_is_updatable(self):
""" Test is_updatable command """
package = FirmwarePackage()
self.fabric.is_updatable(package)
for node in self.nodes:
self.assertEqual(node.method_calls, [
call.is_updatable(package, "INACTIVE", None)
])
def test_update_firmware(self):
""" Test update_firmware command """
package = FirmwarePackage()
self.fabric.update_firmware(package)
for node in self.nodes:
self.assertEqual(node.method_calls, [
call.update_firmware(package, "INACTIVE", None)
])
def test_config_reset(self):
""" Test config_reset command """
self.fabric.config_reset()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.config_reset()])
def test_set_boot_order(self):
""" Test set_boot_order command """
boot_args = "disk0,pxe,retry"
self.fabric.set_boot_order(boot_args)
for node in self.nodes:
self.assertEqual(
node.method_calls, [call.set_boot_order(boot_args)]
)
def test_get_boot_order(self):
""" Test get_boot_order command """
self.fabric.get_boot_order()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_boot_order()])
def test_set_pxe_interface(self):
""" Test set_pxe_interface command """
self.fabric.set_pxe_interface("eth0")
for node in self.nodes:
self.assertEqual(
node.method_calls, [call.set_pxe_interface("eth0")]
)
def test_get_pxe_interface(self):
""" Test get_pxe_interface command """
self.fabric.get_pxe_interface()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_pxe_interface()])
def test_get_versions(self):
""" Test get_versions command """
self.fabric.get_versions()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_versions()])
def test_get_ubootenv(self):
""" Test get_ubootenv command """
self.fabric.get_ubootenv()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_ubootenv()])
def test_ipmitool_command(self):
""" Test ipmitool_command command """
ipmitool_args = "power status"
self.fabric.ipmitool_command(ipmitool_args)
for node in self.nodes:
self.assertEqual(
node.method_calls, [call.ipmitool_command(ipmitool_args)]
)
def test_get_server_ip(self):
""" Test get_server_ip command """
self.fabric.get_server_ip("interface", "ipv6", "aggressive")
for node in self.nodes:
self.assertEqual(node.method_calls,
[call.get_server_ip("interface", "ipv6", "aggressive")]
)
def test_failed_command(self):
""" Test a failed command """
fail_nodes = [DummyFailNode(i) for i in DummyNode.ip_addresses]
self.fabric._nodes = dict(
(i, fail_nodes[i]) for i in xrange(len(self.nodes))
)
try:
self.fabric.get_power()
self.fail()
except CommandFailedError:
for node in fail_nodes:
self.assertEqual(node.method_calls, [call.get_power()])
def test_primary_node(self):
"""Test the primary_node property
Currently it should always return node 0.
"""
self.assertEqual(self.fabric.primary_node, self.nodes[0])
def test_get_ipsrc(self):
"""Test the get_ipsrc method
"""
self.fabric.get_ipsrc()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_get_ip_src()
])
def test_set_ipsrc(self):
"""Test the set_ipsrc method"""
ipsrc = random.randint(1, 2)
self.fabric.set_ipsrc(ipsrc)
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_set_ip_src(ipsrc)
])
def test_apply_fdc(self):
"""Test the apply_factory_default_config method"""
self.fabric.apply_factory_default_config()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_factory_default()
])
def test_get_ipaddr_base(self):
"""Test the get_ipaddr_base method"""
self.fabric.get_ipaddr_base()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_get_ip_addr_base()
])
def test_update_config(self):
"""Test the update_config method
"""
self.fabric.update_config()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_update_config()
])
def test_get_linkspeed(self):
"""Test the get_linkspeed method
"""
self.fabric.get_linkspeed()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_get_linkspeed()
])
def test_set_linkspeed(self):
"""Test the set_linkspeed method"""
valid_linkspeeds = [1, 2.5, 5, 7.5, 10]
linkspeed = random.choice(valid_linkspeeds)
self.fabric.set_linkspeed(linkspeed)
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_set_linkspeed(linkspeed)
])
def test_get_linkspeed_policy(self):
"""Test the get_linkspeed_policy method
"""
self.fabric.get_linkspeed_policy()
self.assertTrue(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_get_linkspeed_policy()
])
def test_set_linkspeed_policy(self):
"""Test the set_linkspeed_policy method"""
ls_policy = random.randint(0, 1)
self.fabric.set_linkspeed_policy(ls_policy)
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_set_linkspeed_policy(ls_policy)
])
def test_get_link_stats(self):
"""Test the get_link_stats() method."""
for i in range(0, 5):
self.fabric.get_link_stats(i)
for node in self.fabric.nodes.values():
node.get_link_stats.assert_called_with(i)
def test_get_linkmap(self):
"""Test the get_linkmap method"""
self.fabric.get_linkmap()
for node in self.fabric.nodes.values():
self.assertTrue(node.get_linkmap.called)
def test_get_routing_table(self):
"""Test the get_routing_table method"""
self.fabric.get_routing_table()
for node in self.fabric.nodes.values():
self.assertTrue(node.get_routing_table.called)
def test_get_depth_chart(self):
"""Test the depth_chart method"""
self.fabric.get_depth_chart()
for node in self.fabric.nodes.values():
self.assertTrue(node.get_depth_chart.called)
def test_get_link_users_factor(self):
"""Test the get_link_users_factor method
"""
self.fabric.get_link_users_factor()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_get_link_users_factor()
])
def test_set_link_users_factor(self):
"""Test the set_link_users_factor method"""
lu_factor = random.randint(5, 50)
self.fabric.set_link_users_factor(lu_factor)
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_set_link_users_factor(lu_factor)
])
def test_add_macaddr (self):
"""Test the add_macaddr method"""
valid_nodeids = [0, 1, 2, 3]
t_nodeid = random.choice(valid_nodeids)
valid_ifaces = [0, 1, 2]
t_iface = random.choice(valid_ifaces)
t_macaddr = "66:55:44:33:22:11"
self.fabric.add_macaddr (t_nodeid, t_iface, t_macaddr)
self.assertTrue(self.fabric.primary_node.bmc.fabric_add_macaddr.called)
def test_rm_macaddr (self):
"""Test the rm_macaddr method"""
valid_nodeids = [0, 1, 2, 3]
t_nodeid = random.choice(valid_nodeids)
valid_ifaces = [0, 1, 2]
t_iface = random.choice(valid_ifaces)
t_macaddr = "66:55:44:33:22:11"
self.fabric.rm_macaddr (t_nodeid, t_iface, t_macaddr)
self.assertTrue(self.fabric.primary_node.bmc.fabric_rm_macaddr.called)
def test_set_macaddr_base(self):
"""Test the set_macaddr_base method"""
self.fabric.set_macaddr_base("00:11:22:33:44:55")
for node in self.fabric.nodes.values():
if node == self.fabric.primary_node:
self.assertEqual(node.bmc.method_calls, [
call.fabric_config_set_macaddr_base(
macaddr="00:11:22:33:44:55"
)
])
else:
self.assertEqual(node.bmc.method_calls, [])
def test_get_macaddr_base(self):
"""Test the get_macaddr_base method"""
self.assertEqual(self.fabric.get_macaddr_base(), "00:00:00:00:00:00")
for node in self.fabric.nodes.values():
if node == self.fabric.primary_node:
self.assertEqual(
node.bmc.method_calls,
[call.fabric_config_get_macaddr_base()]
)
else:
self.assertEqual(node.bmc.method_calls, [])
def test_set_macaddr_mask(self):
"""Test the set_macaddr_mask method"""
self.fabric.set_macaddr_mask("00:11:22:33:44:55")
for node in self.fabric.nodes.values():
if node == self.fabric.primary_node:
self.assertEqual(node.bmc.method_calls, [
call.fabric_config_set_macaddr_mask(
mask="00:11:22:33:44:55"
)
])
else:
self.assertEqual(node.bmc.method_calls, [])
def test_get_macaddr_mask(self):
"""Test the get_macaddr_mask method"""
self.assertEqual(self.fabric.get_macaddr_mask(), "00:00:00:00:00:00")
for node in self.fabric.nodes.values():
if node == self.fabric.primary_node:
self.assertEqual(
node.bmc.method_calls,
[call.fabric_config_get_macaddr_mask()]
)
else:
self.assertEqual(node.bmc.method_calls, [])
def test_composite_bmc(self):
""" Test the CompositeBMC member """
with self.assertRaises(AttributeError):
self.fabric.cbmc.fake_method()
self.fabric.cbmc.set_chassis_power("off")
results = self.fabric.cbmc.get_chassis_status()
self.assertEqual(len(results), len(self.fabric.nodes))
for node_id in self.fabric.nodes:
self.assertFalse(results[node_id].power_on)
for node in self.fabric.nodes.values():
self.assertEqual(node.bmc.method_calls, [
call.set_chassis_power("off"),
call.get_chassis_status()
])
|
pixelrebel/st2 | refs/heads/master | st2common/st2common/util/url.py | 13 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'get_url_without_trailing_slash'
]
def get_url_without_trailing_slash(value):
"""
Function which strips a trailing slash from the provided url if one is present.
:param value: URL to format.
:type value: ``str``
:rtype: ``str``
"""
result = value[:-1] if value.endswith('/') else value
return result
|
bellwethers-in-se/defects | refs/heads/master | src/old/Test Oracle/dtree.py | 2 | from __future__ import division
from lib import *
from demos import *
from table import *
from fi import *
from abcd import *
# from learn import *
from settings import *
from dtree import *
import numpy as np
import sys
sys.dont_write_bytecode = True
from pdb import set_trace
def nl():
print ""
def rankedFeatures(rows, t, features=None):
features = features if features else t.indep
klass = t.klass[0].col
def ranked(f):
syms, at, n = {}, {}, len(rows)
for x in f.counts.keys():
syms[x] = Sym()
for row in rows:
key = row.cells[f.col]
val = row.cells[klass]
syms[key] + val
at[key] = at.get(key, []) + [row]
e = 0
for val in syms.values():
if val.n:
e += val.n / n * val.ent()
return e, f, syms, at
set_trace()
return sorted(ranked(f) for f in features)
def infogain(t, opt=The.tree):
def norm(x):
return (x - lo) / (hi - lo + 0.0001)
for f in t.headers:
f.selected = False
lst = rankedFeatures(t._rows, t)
n = int(len(lst))
n = max(n, 1)
for _, f, _, _ in lst[:n]:
f.selected = True
return [f for e, f, syms, at in lst[:n]]
def tdiv1(t, rows, lvl=-1, asIs=10 ** 32, up=None, features=None, branch=[],
f=None, val=None, opt=None):
here = Thing(t=t, kids=[], f=f, val=val, up=up, lvl=lvl, rows=rows, modes={},
branch=branch)
if f and opt.debug:
print ('|.. ' * lvl) + f.name, "=", val, len(rows)
here.mode = classStats(here).mode()
if lvl > 10:
return here
if asIs == 0:
return here
_, splitter, syms, splits = rankedFeatures(rows, t, features)[0]
for key in sorted(splits.keys()):
someRows = splits[key]
toBe = syms[key].ent()
if opt.variancePrune and lvl > 1 and toBe >= asIs:
continue
if opt.min <= len(someRows) < len(rows):
here.kids += [tdiv1(t, someRows, lvl=lvl + 1, asIs=toBe, features=features,
up=here, f=splitter,
val=key, branch=branch + [(splitter, key)], opt=opt)]
return here
def tdiv(tbl, rows=None, opt=The.tree):
opt = The.tree if not opt else opt
rows = rows or tbl._rows
features = infogain(tbl, opt)
# opt.min = len(rows)**0.5
tree = tdiv1(tbl, rows, opt=opt, features=features, branch=[])
if opt.prune:
modes(tree)
prune(tree)
return tree
def modes(n):
if not n.modes:
n.modes = {n.mode: True}
for kid in n.kids:
for mode in modes(kid):
n.modes[mode] = True
return n.modes
def nmodes(n):
return len(n.modes.keys())
def prune(n):
if nmodes(n) == 1:
n.kids = []
for kid in n.kids:
prune(kid)
def classStats(n):
depen = lambda x: x.cells[n.t.klass[0].col]
return Sym(depen(x) for x in n.rows)
def showTdiv(n, lvl=-1):
if n.f:
say(('|..' * lvl) + str(n.f.name) + "=" + str(n.val) +
"\t:" + str(n.mode) + " #" + str(nmodes(n)))
if n.kids:
nl()
for k in n.kids:
showTdiv(k, lvl + 1)
else:
s = classStats(n)
print ' ' + str(int(100 * s.counts[s.mode()] / len(n.rows))) + '% * ' + str(len(n.rows))
def dtnodes(tree, lvl=0):
if tree:
yield tree, lvl
for kid in tree.kids:
lvl1 = lvl
for sub, lvl1 in dtnodes(kid, lvl1 + 1):
yield sub, lvl1
def dtleaves(tree):
for node, _ in dtnodes(tree):
# print "K>", tree.kids[0].__dict__.keys()
if not node.kids:
yield node
# if tree:
# if tree.kids:
# for kid in tree.kids:
# for leaf in leaves(kid):
# yield leaf
# else:
# yield tree
def xval(tbl, m=None, n=None, opt=The.tree):
m = m or The.tree.m
n = n or The.tree.n
cells = map(lambda row: opt.cells(row), tbl._rows)
all = m * n
for i in range(m):
print "*" * all
cells = shuffle(cells)
div = len(cells) // n
for j in range(n):
all -= 1
lo = j * div
hi = lo + div
train = clone(tbl, cells[:lo] + cells[hi:])
test = map(Row, cells[lo:hi])
yield test, train
def last(lst):
return lst[-1]
def apex(test, tree, opt=The.tree):
"""apex= leaf at end of biggest (most supported)
branch that is selected by test in a tree"""
def equals(val, span):
if val == opt.missing or val == span:
return True
else:
if isinstance(span, tuple):
lo, hi = span
return lo <= val <= hi # <hi
else:
return span == val
def apex1(cells, tree):
found = False
for kid in tree.kids:
val = cells[kid.f.col]
if equals(val, kid.val):
for leaf in apex1(cells, kid):
found = True
yield leaf
if not found:
yield tree
leaves = [(len(leaf.rows), leaf)
for leaf in apex1(opt.cells(test), tree)]
return second(last(sorted(leaves)))
def classify(test, tree, opt=The.tree):
return apex(test, tree, opt=The.tree).mode
def improve(test, tree, opt=The.tree):
return change(test, tree, opt.better, opt)
def degrade(test, tree, opt=The.tree):
return change(test, tree, opt.worse, opt)
def change(test, tree, how, opt=The.tree):
leaf1 = apex(test, tree, opt)
new = old = leaf.mode
if how(leaf):
copy = opt.cells(test)[:]
for col, val in how(leaf1).items():
copy[col] = val
new = classify(Row(copy), tree, opt)
return old, new
def jumpUp(test, tree, opt=The.tree):
return jump(test, tree, opt.better, opt)
def jumpDown(test, tree, opt=The.tree):
return jump(test, tree, opt.worse, opt)
def jump(test, tree, how, opt=The.tree):
toBe = asIs = apex(test, tree, opt)
if how(asIs):
copy = opt.cells(test)[:]
for col, val in how(asIs).items():
copy[col] = val
toBe = apex(Row(copy), tree, opt)
return asIs, toBe
def rows1(row, tbl, cells=lambda r: r.cells):
print ""
for h, cell in zip(tbl.headers, cells(row)):
print h.col, ") ", h.name, cell
def snakesAndLadders(tree, train, w):
def klass(x):
return x.cells[train.klass[0].col]
def l2t(l):
return l.tbl
def xpect(tbl):
return tbl.klass[0].centroid()
def score(l):
if callable(w):
return w(l)
if isinstance(w, dict):
return w[xpect(l2t(l))]
return l
for node in dtnodes(tree):
node.tbl = clone(train,
rows=map(lambda x: x.cells, node.rows),
keepSelections=True)
node.tbl.centroid = centroid(node.tbl, selections=True)
for node1 in dtnodes(tree):
id1 = node1._id
node1.far = []
node1.snake = None
node1.worse = []
node1.ladder = None
node1.better = []
for node2 in dtnodes(tree):
# if id1 > node2._id:
sames = overlap(node1.tbl.centroid, node2.tbl.centroid)
node1.far += [(sames, node2)]
# node2.far += [(sames,node1)]
for node1 in dtnodes(tree):
# sorted in reverse order of distance
node1.far = sorted(node1.far,
key=lambda x: first(x))
# at end of this loop, the last ladder, snakes are closest
for _, node2 in node1.far:
delta = prefer(node2.branch, node1.branch, key=lambda x: x.col)
if delta:
if score(node2) > score(node1):
node1.ladder = node2
node1.better = delta
if score(node2) < score(node1):
node1.snake = node2
node1.worse = delta
for node in dtnodes(tree):
snake = node.snake._id if node.snake else None
ladder = node.ladder._id if node.ladder else None
@demo
def tdived(file='data/diabetes.csv'):
tbl = discreteTable(file)
# exit()
tree = tdiv(tbl)
showTdiv(tree)
@demo
def cross(file='data/housingD.csv', rseed=1):
def klass(test):
return test.cells[train.klass[0].col]
seed(rseed)
tbl = discreteTable(file)
n = 0
abcd = Abcd()
nLeaves = Num()
nNodes = Num()
for tests, train in xval(tbl):
tree = tdiv(train)
for node in dtnodes(tree):
print node.branch
nLeaves + len([n for n in dtleaves(tree)])
nNodes + len([n for n in dtnodes(tree)])
for test in tests:
want = klass(test)
got = classify(test, tree)
abcd(want, got)
exit()
nl()
abcd.header()
abcd.report()
print ":nodes", sorted(nNodes.some.all())
print ":leaves", sorted(nLeaves.some.all())
ninf = float("-inf")
@demo
def snl(file='data/poi-1.5D.csv', rseed=1, w=dict(_1=0, _0=1)):
def klass(x):
return x.cells[train.klass[0].col]
def val(xxx_todo_changeme):
(x, y) = xxx_todo_changeme
return y if x == ninf else x
seed(rseed)
nl()
print "#", file
tbl = discreteTable(file)
tree0 = tdiv(tbl)
showTdiv(tree0)
nl()
old, better, worse = Sym(), Sym(), Sym()
abcd1, abcd2 = Abcd(db=file, rx="where"), Abcd(db=file, rx="ranfor")
abcd3 = Abcd(db=file, rx="logref")
abcd4 = Abcd(db=file, rx="dt")
abcd5 = Abcd(db=file, rx="nb")
for tests, train in xval(tbl):
learns(tests, train._rows,
indep=lambda row: map(val, row.cells[:-2]),
dep=lambda row: row.cells[-1],
rf=abcd2,
lg=abcd3,
dt=abcd4,
nb=abcd5),
tree = tdiv(train)
snakesAndLadders(tree, train, w)
for test in tests:
abcd1(actual=klass(test),
predicted=classify(test, tree))
a, b = improve(test, tree)
old + a
better + b
_, c = degrade(test, tree)
worse + c
print "\n:asIs", old.counts
print ":plan", better.counts
print ":warn", worse.counts
abcd1.header()
abcd1.report()
abcd2.report()
abcd3.report()
abcd4.report()
abcd5.report()
def plot(x, y, title, xlabel, ylabel, fname):
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
plt.plot(x, y)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.subplots_adjust(left=0.15)
plt.savefig(fname + '.jpg')
plt.close()
# cross()
if __name__ == '__main__':
eval(cmd())
|
airtrick/zxing | refs/heads/master | cpp/scons/scons-local-2.0.0.final.0/SCons/Options/EnumOption.py | 34 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/EnumOption.py 5023 2010/06/14 22:05:46 scons"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def EnumOption(*args, **kw):
global warned
if not warned:
msg = "The EnumOption() function is deprecated; use the EnumVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return SCons.Variables.EnumVariable(*args, **kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
coberger/DIRAC | refs/heads/integration | ConfigurationSystem/scripts/dirac-admin-add-resources.py | 2 | #!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-add-resources
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Add resources from the BDII database for a given VO
"""
__RCSID__ = "$Id$"
import signal
import re
import os
from urlparse import urlparse
from DIRAC.Core.Base import Script
def processScriptSwitches():
global vo, dry, doCEs, doSEs
Script.registerSwitch( "V:", "vo=", "Virtual Organization" )
Script.registerSwitch( "D", "dry", "Dry run" )
Script.registerSwitch( "C", "ce", "Process Computing Elements" )
Script.registerSwitch( "S", "se", "Process Storage Elements" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile]' % Script.scriptName ] ) )
Script.parseCommandLine( ignoreErrors = True )
vo = ''
dry = False
doCEs = False
doSEs = False
for sw in Script.getUnprocessedSwitches():
if sw[0] in ( "V", "vo" ):
vo = sw[1]
if sw[0] in ( "D", "dry" ):
dry = True
if sw[0] in ( "C", "ce" ):
doCEs = True
if sw[0] in ( "S", "se" ):
doSEs = True
from DIRAC import gLogger, exit as DIRACExit, S_OK
from DIRAC.ConfigurationSystem.Client.Utilities import getGridCEs, getSiteUpdates, getCEsFromCS, \
getGridSRMs, getSRMUpdates
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getDIRACSiteName
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOs, getVOOption
ceBdiiDict = None
def checkUnusedCEs():
global vo, dry, ceBdiiDict
gLogger.notice( 'looking for new computing resources in the BDII database...' )
result = getCEsFromCS()
if not result['OK']:
gLogger.error( 'ERROR: failed to get CEs from CS', result['Message'] )
DIRACExit( -1 )
knownCEs = result['Value']
result = getGridCEs( vo, ceBlackList = knownCEs )
if not result['OK']:
gLogger.error( 'ERROR: failed to get CEs from BDII', result['Message'] )
DIRACExit( -1 )
ceBdiiDict = result['BdiiInfo']
siteDict = result['Value']
if siteDict:
gLogger.notice( 'New resources available:\n' )
for site in siteDict:
diracSite = 'Unknown'
result = getDIRACSiteName( site )
if result['OK']:
diracSite = ','.join( result['Value'] )
ces = siteDict[site].keys()
if ces:
gLogger.notice( " %s, DIRAC site %s" % ( site, diracSite) )
for ce in ces:
gLogger.notice( ' '*4+ce )
gLogger.notice( ' %s, %s' % ( siteDict[site][ce]['CEType'], '%s_%s_%s' % siteDict[site][ce]['System'] ) )
else:
gLogger.notice( 'No new resources available, exiting' )
DIRACExit( 0 )
inp = raw_input( "\nDo you want to add sites ? [default=yes] [yes|no]: ")
inp = inp.strip()
if not inp and inp.lower().startswith( 'n' ):
gLogger.notice( 'Nothing else to be done, exiting' )
DIRACExit( 0 )
gLogger.notice( '\nAdding new sites/CEs interactively\n' )
sitesAdded = []
for site in siteDict:
# Get the country code:
country = ''
ces = siteDict[site].keys()
for ce in ces:
country = ce.strip().split('.')[-1].lower()
if len( country ) == 2:
break
if country == 'gov':
country = 'us'
break
if not country or len( country ) != 2:
country = 'xx'
result = getDIRACSiteName( site )
if not result['OK']:
gLogger.notice( '\nThe site %s is not yet in the CS, give it a name' % site )
diracSite = raw_input( '[help|skip|<domain>.<name>.%s]: ' % country )
if diracSite.lower() == "skip":
continue
if diracSite.lower() == "help":
gLogger.notice( '%s site details:' % site )
for k,v in ceBdiiDict[site].items():
if k != "CEs":
gLogger.notice( '%s\t%s' % (k,v) )
gLogger.notice( '\nEnter DIRAC site name in the form <domain>.<name>.%s\n' % country )
diracSite = raw_input( '[<domain>.<name>.%s]: ' % country )
try:
_, _, _ = diracSite.split( '.' )
except ValueError:
gLogger.error( 'ERROR: DIRAC site name does not follow convention: %s' % diracSite )
continue
diracSites = [diracSite]
else:
diracSites = result['Value']
if len( diracSites ) > 1:
gLogger.notice( 'Attention! GOC site %s corresponds to more than one DIRAC sites:' % site )
gLogger.notice( str( diracSites ) )
gLogger.notice( 'Please, pay attention which DIRAC site the new CEs will join\n' )
newCEs = {}
addedCEs = []
for ce in ces:
ceType = siteDict[site][ce]['CEType']
for diracSite in diracSites:
if ce in addedCEs:
continue
yn = raw_input( "Add CE %s of type %s to %s? [default yes] [yes|no]: " % ( ce, ceType, diracSite ) )
if yn == '' or yn.lower() == 'y':
newCEs.setdefault( diracSite, [] )
newCEs[diracSite].append( ce )
addedCEs.append( ce )
for diracSite in diracSites:
if diracSite in newCEs:
cmd = "dirac-admin-add-site %s %s %s" % ( diracSite, site, ' '.join( newCEs[diracSite] ) )
gLogger.notice( "\nNew site/CEs will be added with command:\n%s" % cmd )
yn = raw_input( "Add it ? [default yes] [yes|no]: " )
if not ( yn == '' or yn.lower() == 'y' ) :
continue
if dry:
gLogger.notice( "Command is skipped in the dry run" )
else:
result = shellCall( 0, cmd )
if not result['OK']:
gLogger.error( 'Error while executing dirac-admin-add-site command' )
yn = raw_input( "Do you want to continue ? [default no] [yes|no]: " )
if yn == '' or yn.lower().startswith( 'n' ):
if sitesAdded:
gLogger.notice( 'CEs were added at the following sites:' )
for site, diracSite in sitesAdded:
gLogger.notice( "%s\t%s" % ( site, diracSite ) )
DIRACExit( 0 )
else:
exitStatus, stdData, errData = result[ 'Value' ]
if exitStatus:
gLogger.error( 'Error while executing dirac-admin-add-site command\n', '\n'.join( [stdData, errData] ) )
yn = raw_input( "Do you want to continue ? [default no] [yes|no]: " )
if yn == '' or yn.lower().startswith( 'n' ):
if sitesAdded:
gLogger.notice( 'CEs were added at the following sites:' )
for site, diracSite in sitesAdded:
gLogger.notice( "%s\t%s" % ( site, diracSite ) )
DIRACExit( 0 )
else:
sitesAdded.append( ( site, diracSite ) )
gLogger.notice( stdData )
if sitesAdded:
gLogger.notice( 'CEs were added at the following sites:' )
for site, diracSite in sitesAdded:
gLogger.notice( "%s\t%s" % ( site, diracSite ) )
else:
gLogger.notice( 'No new CEs were added this time' )
def updateCS( changeSet ):
global vo, dry, ceBdiiDict
changeList = list( changeSet )
changeList.sort()
if dry:
gLogger.notice( 'The following needed changes are detected:\n' )
else:
gLogger.notice( 'We are about to make the following changes to CS:\n' )
for entry in changeList:
gLogger.notice( "%s/%s %s -> %s" % entry )
if not dry:
csAPI = CSAPI()
csAPI.initialize()
result = csAPI.downloadCSData()
if not result['OK']:
gLogger.error( 'Failed to initialize CSAPI object', result['Message'] )
DIRACExit( -1 )
for section, option, value, new_value in changeSet:
if value == 'Unknown' or not value:
csAPI.setOption( cfgPath( section, option ), new_value )
else:
csAPI.modifyValue( cfgPath( section, option ), new_value )
yn = raw_input( 'Do you want to commit changes to CS ? [default yes] [yes|no]: ' )
if yn == '' or yn.lower().startswith( 'y' ):
result = csAPI.commit()
if not result['OK']:
gLogger.error( "Error while commit to CS", result['Message'] )
else:
gLogger.notice( "Successfully committed %d changes to CS" % len( changeSet ) )
def updateSites():
global vo, dry, ceBdiiDict
result = getSiteUpdates( vo, bdiiInfo = ceBdiiDict )
if not result['OK']:
gLogger.error( 'Failed to get site updates', result['Message'] )
DIRACExit( -1 )
changeSet = result['Value']
updateCS( changeSet )
def checkUnusedSEs():
global vo, dry
result = getGridSRMs( vo, unUsed = True )
if not result['OK']:
gLogger.error( 'Failed to look up SRMs in BDII', result['Message'] )
siteSRMDict = result['Value']
# Evaluate VOs
result = getVOs()
if result['OK']:
csVOs = set( result['Value'] )
else:
csVOs = {vo}
changeSetFull = set()
for site in siteSRMDict:
for gridSE in siteSRMDict[site]:
changeSet = set()
seDict = siteSRMDict[site][gridSE]['SE']
srmDict = siteSRMDict[site][gridSE]['SRM']
# Check the SRM version
version = srmDict.get( 'GlueServiceVersion', '' )
if not ( version and version.startswith( '2' ) ):
gLogger.debug( 'Skipping SRM service with version %s' % version )
continue
result = getDIRACSiteName( site )
if not result['OK']:
gLogger.notice( 'Unused se %s is detected at unused site %s' % ( gridSE, site ) )
gLogger.notice( 'Consider adding site %s to the DIRAC CS' % site )
continue
diracSites = result['Value']
yn = raw_input( '\nDo you want to add new SRM SE %s at site(s) %s ? default yes [yes|no]: ' % ( gridSE, str( diracSites ) ) )
if not yn or yn.lower().startswith( 'y' ):
if len( diracSites ) > 1:
prompt = 'Which DIRAC site the new SE should be attached to ?'
for i, s in enumerate( diracSites ):
prompt += '\n[%d] %s' % ( i, s )
prompt += '\nEnter your choice number: '
inp = raw_input( prompt )
try:
ind = int( inp )
except:
gLogger.notice( 'Can not interpret your choice: %s, try again later' % inp )
continue
diracSite = diracSites[ind]
else:
diracSite = diracSites[0]
domain, siteName, country = diracSite.split( '.' )
recName = '%s-disk' % siteName
inp = raw_input( 'Give a DIRAC name to the grid SE %s, default %s : ' % ( gridSE, recName ) )
diracSEName = inp
if not inp:
diracSEName = recName
gLogger.notice( 'Adding new SE %s at site %s' % ( diracSEName, diracSite ) )
seSection = cfgPath( '/Resources/StorageElements', diracSEName )
changeSet.add( ( seSection, 'BackendType', seDict.get( 'GlueSEImplementationName', 'Unknown' ) ) )
changeSet.add( ( seSection, 'Description', seDict.get( 'GlueSEName', 'Unknown' ) ) )
bdiiVOs = set( [ re.sub( '^VO:', '', rule ) for rule in srmDict.get( 'GlueServiceAccessControlBaseRule', [] ) ] )
seVOs = csVOs.intersection( bdiiVOs )
changeSet.add( ( seSection, 'VO', ','.join( seVOs ) ) )
accessSection = cfgPath( seSection, 'AccessProtocol.1' )
changeSet.add( ( accessSection, 'Protocol', 'srm' ) )
changeSet.add( ( accessSection, 'ProtocolName', 'SRM2' ) )
endPoint = srmDict.get( 'GlueServiceEndpoint', '' )
host = urlparse( endPoint ).hostname
port = result['Value']['Port']
changeSet.add( ( accessSection, 'Host', host ) )
changeSet.add( ( accessSection, 'Port', port ) )
changeSet.add( ( accessSection, 'Access', 'remote' ) )
voPathSection = cfgPath( accessSection, 'VOPath' )
if 'VOPath' in seDict:
path = seDict['VOPath']
voFromPath = os.path.basename( path )
if voFromPath != diracVO:
gLogger.notice( '\n!!! Warning: non-conventional VO path: %s\n' % path )
changeSet.add( ( voPathSection, diracVO, path ) )
path = os.path.dirname( path )
else:
# Try to guess the Path
domain = '.'.join( host.split( '.' )[-2:] )
path = '/dpm/%s/home' % domain
changeSet.add( ( accessSection, 'Path', path ) )
changeSet.add( ( accessSection, 'SpaceToken', '' ) )
changeSet.add( ( accessSection, 'WSUrl', '/srm/managerv2?SFN=' ) )
gLogger.notice( 'SE %s will be added with the following parameters' % diracSEName )
changeList = list( changeSet )
changeList.sort()
for entry in changeList:
gLogger.notice( entry )
yn = raw_input( 'Do you want to add new SE %s ? default yes [yes|no]: ' % diracSEName )
if not yn or yn.lower().startswith( 'y' ):
changeSetFull = changeSetFull.union( changeSet )
if dry:
if changeSetFull:
gLogger.notice( 'Skipping commit of the new SE data in a dry run' )
else:
gLogger.notice( "No new SE to be added" )
return S_OK()
if changeSetFull:
csAPI = CSAPI()
csAPI.initialize()
result = csAPI.downloadCSData()
if not result['OK']:
gLogger.error( 'Failed to initialize CSAPI object', result['Message'] )
DIRACExit( -1 )
changeList = list( changeSetFull )
changeList.sort()
for section, option, value in changeList:
csAPI.setOption( cfgPath( section, option ), value )
yn = raw_input( 'New SE data is accumulated\n Do you want to commit changes to CS ? default yes [yes|no]: ' )
if not yn or yn.lower().startswith( 'y' ):
result = csAPI.commit()
if not result['OK']:
gLogger.error( "Error while commit to CS", result['Message'] )
else:
gLogger.notice( "Successfully committed %d changes to CS" % len( changeSetFull ) )
else:
gLogger.notice( "No new SE to be added" )
return S_OK()
def updateSEs():
global vo, dry
result = getSRMUpdates( vo )
if not result['OK']:
gLogger.error( 'Failed to get SRM updates', result['Message'] )
DIRACExit( -1 )
changeSet = result['Value']
updateCS( changeSet )
def handler( signum, frame ):
gLogger.notice( '\nExit is forced, bye...' )
DIRACExit( -1 )
if __name__ == "__main__":
signal.signal( signal.SIGTERM, handler )
signal.signal( signal.SIGINT, handler )
vo = ''
dry = False
doCEs = False
doSEs = False
ceBdiiDict = None
processScriptSwitches()
if not vo:
gLogger.error( 'No VO specified' )
DIRACExit( -1 )
diracVO = vo
vo = getVOOption( vo, 'VOMSName', vo )
if doCEs:
yn = raw_input( 'Do you want to check/add new sites to CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
checkUnusedCEs()
yn = raw_input( 'Do you want to update CE details in the CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
updateSites()
if doSEs:
yn = raw_input( 'Do you want to check/add new storage elements to CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
result = checkUnusedSEs()
yn = raw_input( 'Do you want to update SE details in the CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
updateSEs()
|
cetic/ansible | refs/heads/devel | lib/ansible/modules/network/cloudengine/ce_bgp_neighbor.py | 26 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_bgp_neighbor
version_added: "2.4"
short_description: Manages BGP peer configuration on HUAWEI CloudEngine switches.
description:
- Manages BGP peer configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
vrf_name:
description:
- Name of a BGP instance. The name is a case-sensitive string of characters.
The BGP instance can be used only after the corresponding VPN instance is created.
required: true
peer_addr:
description:
- Connection address of a peer, which can be an IPv4 or IPv6 address.
required: true
remote_as:
description:
- AS number of a peer.
The value is a string of 1 to 11 characters.
required: true
description:
description:
- Description of a peer, which can be letters or digits.
The value is a string of 1 to 80 characters.
required: false
default: null
fake_as:
description:
- Fake AS number that is specified for a local peer.
The value is a string of 1 to 11 characters.
required: false
default: null
dual_as:
description:
- If the value is true, the EBGP peer can use either a fake AS number or the actual AS number.
If the value is false, the EBGP peer can only use a fake AS number.
required: false
choices: ['no_use','true','false']
default: no_use
conventional:
description:
- If the value is true, the router has all extended capabilities.
If the value is false, the router does not have all extended capabilities.
required: false
choices: ['no_use','true','false']
default: no_use
route_refresh:
description:
- If the value is true, BGP is enabled to advertise REFRESH packets.
If the value is false, the route refresh function is enabled.
required: false
choices: ['no_use','true','false']
default: no_use
is_ignore:
description:
- If the value is true, the session with a specified peer is torn down and all related
routing entries are cleared.
If the value is false, the session with a specified peer is retained.
required: false
choices: ['no_use','true','false']
default: no_use
local_if_name:
description:
- Name of a source interface that sends BGP packets.
The value is a string of 1 to 63 characters.
required: false
default: null
ebgp_max_hop:
description:
- Maximum number of hops in an indirect EBGP connection.
The value is an ranging from 1 to 255.
required: false
default: null
valid_ttl_hops:
description:
- Enable GTSM on a peer or peer group.
The valid-TTL-Value parameter is used to specify the number of TTL hops to be detected.
The value is an integer ranging from 1 to 255.
required: false
default: null
connect_mode:
description:
- The value can be Connect-only, Listen-only, or Both.
required: false
default: null
is_log_change:
description:
- If the value is true, BGP is enabled to record peer session status and event information.
If the value is false, BGP is disabled from recording peer session status and event information.
required: false
choices: ['no_use','true','false']
default: no_use
pswd_type:
description:
- Enable BGP peers to establish a TCP connection and perform the Message Digest 5 (MD5)
authentication for BGP messages.
required: false
choices: ['null','cipher','simple']
default: null
pswd_cipher_text:
description:
- The character string in a password identifies the contents of the password, spaces not supported.
The value is a string of 1 to 255 characters.
required: false
default: null
keep_alive_time:
description:
- Specify the Keepalive time of a peer or peer group.
The value is an integer ranging from 0 to 21845. The default value is 60.
required: false
default: null
hold_time:
description:
- Specify the Hold time of a peer or peer group.
The value is 0 or an integer ranging from 3 to 65535.
required: false
default: null
min_hold_time:
description:
- Specify the Min hold time of a peer or peer group.
required: false
default: null
key_chain_name:
description:
- Specify the Keychain authentication name used when BGP peers establish a TCP connection.
The value is a string of 1 to 47 case-insensitive characters.
required: false
default: null
conn_retry_time:
description:
- ConnectRetry interval.
The value is an integer ranging from 1 to 65535.
required: false
default: null
tcp_MSS:
description:
- Maximum TCP MSS value used for TCP connection establishment for a peer.
The value is an integer ranging from 176 to 4096.
required: false
default: null
mpls_local_ifnet_disable:
description:
- If the value is true, peer create MPLS Local IFNET disable.
If the value is false, peer create MPLS Local IFNET enable.
required: false
choices: ['no_use','true','false']
default: no_use
prepend_global_as:
description:
- Add the global AS number to the Update packets to be advertised.
required: false
choices: ['no_use','true','false']
default: no_use
prepend_fake_as:
description:
- Add the Fake AS number to received Update packets.
required: false
choices: ['no_use','true','false']
default: no_use
is_bfd_block:
description:
- If the value is true, peers are enabled to inherit the BFD function from the peer group.
If the value is false, peers are disabled to inherit the BFD function from the peer group.
required: false
choices: ['no_use','true','false']
default: no_use
multiplier:
description:
- Specify the detection multiplier. The default value is 3.
The value is an integer ranging from 3 to 50.
required: false
default: null
is_bfd_enable:
description:
- If the value is true, BFD is enabled.
If the value is false, BFD is disabled.
required: false
choices: ['no_use','true','false']
default: no_use
rx_interval:
description:
- Specify the minimum interval at which BFD packets are received.
The value is an integer ranging from 50 to 1000, in milliseconds.
required: false
default: null
tx_interval:
description:
- Specify the minimum interval at which BFD packets are sent.
The value is an integer ranging from 50 to 1000, in milliseconds.
required: false
default: null
is_single_hop:
description:
- If the value is true, the system is enabled to preferentially use the single-hop mode for
BFD session setup between IBGP peers.
If the value is false, the system is disabled from preferentially using the single-hop
mode for BFD session setup between IBGP peers.
required: false
choices: ['no_use','true','false']
default: no_use
'''
EXAMPLES = '''
- name: CloudEngine BGP neighbor test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config bgp peer"
ce_bgp_neighbor:
state: present
vrf_name: js
peer_addr: 192.168.10.10
remote_as: 500
provider: "{{ cli }}"
- name: "Config bgp route id"
ce_bgp_neighbor:
state: absent
vrf_name: js
peer_addr: 192.168.10.10
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"peer_addr": "192.168.10.10", "remote_as": "500", "state": "present", "vrf_name": "js"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"bgp peer": []}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"bgp peer": [["192.168.10.10", "500"]]}
updates:
description: command sent to the device
returned: always
type: list
sample: ["peer 192.168.10.10 as-number 500"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
# get bgp peer
CE_GET_BGP_PEER_HEADER = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpPeers>
<bgpPeer>
<peerAddr></peerAddr>
"""
CE_GET_BGP_PEER_TAIL = """
</bgpPeer>
</bgpPeers>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp peer
CE_MERGE_BGP_PEER_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpPeers>
<bgpPeer operation="merge">
<peerAddr>%s</peerAddr>
"""
CE_MERGE_BGP_PEER_TAIL = """
</bgpPeer>
</bgpPeers>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# create bgp peer
CE_CREATE_BGP_PEER_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpPeers>
<bgpPeer operation="create">
<peerAddr>%s</peerAddr>
"""
CE_CREATE_BGP_PEER_TAIL = """
</bgpPeer>
</bgpPeers>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# delete bgp peer
CE_DELETE_BGP_PEER_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpPeers>
<bgpPeer operation="delete">
<peerAddr>%s</peerAddr>
"""
CE_DELETE_BGP_PEER_TAIL = """
</bgpPeer>
</bgpPeers>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# get peer bfd
CE_GET_PEER_BFD_HEADER = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpPeers>
<bgpPeer>
<peerAddr>%s</peerAddr>
<peerBfd>
"""
CE_GET_PEER_BFD_TAIL = """
</peerBfd>
</bgpPeer>
</bgpPeers>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</filter>
"""
# merge peer bfd
CE_MERGE_PEER_BFD_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpPeers>
<bgpPeer>
<peerAddr>%s</peerAddr>
<peerBfd operation="merge">
"""
CE_MERGE_PEER_BFD_TAIL = """
</peerBfd>
</bgpPeer>
</bgpPeers>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# delete peer bfd
CE_DELETE_PEER_BFD_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpPeers>
<bgpPeer>
<peerAddr>%s</peerAddr>
<peerBfd operation="delete">
"""
CE_DELETE_PEER_BFD_TAIL = """
</peerBfd>
</bgpPeer>
</bgpPeers>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
class BgpNeighbor(object):
""" Manages BGP peer configuration """
def netconf_get_config(self, **kwargs):
""" netconf_get_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" netconf_set_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_bgp_peer_args(self, **kwargs):
""" check_bgp_peer_args """
module = kwargs["module"]
result = dict()
need_cfg = False
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name)
peer_addr = module.params['peer_addr']
if peer_addr:
if not check_ip_addr(ipaddr=peer_addr):
module.fail_json(
msg='Error: The peer_addr %s is invalid.' % peer_addr)
need_cfg = True
remote_as = module.params['remote_as']
if remote_as:
if len(remote_as) > 11 or len(remote_as) < 1:
module.fail_json(
msg='Error: The len of remote_as %s is out of [1 - 11].' % remote_as)
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_bgp_peer_other_args(self, **kwargs):
""" check_bgp_peer_other_args """
module = kwargs["module"]
result = dict()
need_cfg = False
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name)
description = module.params['description']
if description:
if len(description) > 80 or len(description) < 1:
module.fail_json(
msg='Error: The len of description %s is out of [1 - 80].' % description)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<description></description>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<description>(.*)</description>.*', recv_xml)
if re_find:
result["description"] = re_find
if re_find[0] != description:
need_cfg = True
else:
need_cfg = True
fake_as = module.params['fake_as']
if fake_as:
if len(fake_as) > 11 or len(fake_as) < 1:
module.fail_json(
msg='Error: The len of fake_as %s is out of [1 - 11].' % fake_as)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<fakeAs></fakeAs>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<fakeAs>(.*)</fakeAs>.*', recv_xml)
if re_find:
result["fake_as"] = re_find
if re_find[0] != fake_as:
need_cfg = True
else:
need_cfg = True
dual_as = module.params['dual_as']
if dual_as != 'no_use':
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<dualAs></dualAs>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<dualAs>(.*)</dualAs>.*', recv_xml)
if re_find:
result["dual_as"] = re_find
if re_find[0] != fake_as:
need_cfg = True
else:
need_cfg = True
conventional = module.params['conventional']
if conventional != 'no_use':
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<conventional></conventional>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<conventional>(.*)</conventional>.*', recv_xml)
if re_find:
result["conventional"] = re_find
if re_find[0] != conventional:
need_cfg = True
else:
need_cfg = True
route_refresh = module.params['route_refresh']
if route_refresh != 'no_use':
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<routeRefresh></routeRefresh>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeRefresh>(.*)</routeRefresh>.*', recv_xml)
if re_find:
result["route_refresh"] = re_find
if re_find[0] != route_refresh:
need_cfg = True
else:
need_cfg = True
four_byte_as = module.params['four_byte_as']
if four_byte_as != 'no_use':
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<fourByteAs></fourByteAs>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<fourByteAs>(.*)</fourByteAs>.*', recv_xml)
if re_find:
result["four_byte_as"] = re_find
if re_find[0] != four_byte_as:
need_cfg = True
else:
need_cfg = True
is_ignore = module.params['is_ignore']
if is_ignore != 'no_use':
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<isIgnore></isIgnore>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isIgnore>(.*)</isIgnore>.*', recv_xml)
if re_find:
result["is_ignore"] = re_find
if re_find[0] != is_ignore:
need_cfg = True
else:
need_cfg = True
local_if_name = module.params['local_if_name']
if local_if_name:
if len(local_if_name) > 63 or len(local_if_name) < 1:
module.fail_json(
msg='Error: The len of local_if_name %s is out of [1 - 63].' % local_if_name)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<localIfName></localIfName>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<localIfName>(.*)</localIfName>.*', recv_xml)
if re_find:
result["local_if_name"] = re_find
if re_find[0] != local_if_name:
need_cfg = True
else:
need_cfg = True
ebgp_max_hop = module.params['ebgp_max_hop']
if ebgp_max_hop:
if int(ebgp_max_hop) > 255 or int(ebgp_max_hop) < 1:
module.fail_json(
msg='Error: The value of ebgp_max_hop %s is out of [1 - 255].' % ebgp_max_hop)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<ebgpMaxHop></ebgpMaxHop>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<ebgpMaxHop>(.*)</ebgpMaxHop>.*', recv_xml)
if re_find:
result["ebgp_max_hop"] = re_find
if re_find[0] != ebgp_max_hop:
need_cfg = True
else:
need_cfg = True
valid_ttl_hops = module.params['valid_ttl_hops']
if valid_ttl_hops:
if int(valid_ttl_hops) > 255 or int(valid_ttl_hops) < 1:
module.fail_json(
msg='Error: The value of valid_ttl_hops %s is out of [1 - 255].' % valid_ttl_hops)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<validTtlHops></validTtlHops>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<validTtlHops>(.*)</validTtlHops>.*', recv_xml)
if re_find:
result["valid_ttl_hops"] = re_find
if re_find[0] != valid_ttl_hops:
need_cfg = True
else:
need_cfg = True
connect_mode = module.params['connect_mode']
if connect_mode:
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<connectMode></connectMode>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<connectMode>(.*)</connectMode>.*', recv_xml)
if re_find:
result["connect_mode"] = re_find
if re_find[0] != connect_mode:
need_cfg = True
else:
need_cfg = True
is_log_change = module.params['is_log_change']
if is_log_change != 'no_use':
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<isLogChange></isLogChange>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isLogChange>(.*)</isLogChange>.*', recv_xml)
if re_find:
result["is_log_change"] = re_find
if re_find[0] != is_log_change:
need_cfg = True
else:
need_cfg = True
pswd_type = module.params['pswd_type']
if pswd_type:
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<pswdType></pswdType>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<pswdType>(.*)</pswdType>.*', recv_xml)
if re_find:
result["pswd_type"] = re_find
if re_find[0] != pswd_type:
need_cfg = True
else:
need_cfg = True
pswd_cipher_text = module.params['pswd_cipher_text']
if pswd_cipher_text:
if len(pswd_cipher_text) > 255 or len(pswd_cipher_text) < 1:
module.fail_json(
msg='Error: The len of pswd_cipher_text %s is out of [1 - 255].' % pswd_cipher_text)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<pswdCipherText></pswdCipherText>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<pswdCipherText>(.*)</pswdCipherText>.*', recv_xml)
if re_find:
result["pswd_cipher_text"] = re_find
if re_find[0] != pswd_cipher_text:
need_cfg = True
else:
need_cfg = True
keep_alive_time = module.params['keep_alive_time']
if keep_alive_time:
if int(keep_alive_time) > 21845 or len(keep_alive_time) < 0:
module.fail_json(
msg='Error: The len of keep_alive_time %s is out of [0 - 21845].' % keep_alive_time)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<keepAliveTime></keepAliveTime>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<keepAliveTime>(.*)</keepAliveTime>.*', recv_xml)
if re_find:
result["keep_alive_time"] = re_find
if re_find[0] != keep_alive_time:
need_cfg = True
else:
need_cfg = True
hold_time = module.params['hold_time']
if hold_time:
if int(hold_time) != 0 and (int(hold_time) > 65535 or int(hold_time) < 3):
module.fail_json(
msg='Error: The value of hold_time %s is out of [0 or 3 - 65535].' % hold_time)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<holdTime></holdTime>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<holdTime>(.*)</holdTime>.*', recv_xml)
if re_find:
result["hold_time"] = re_find
if re_find[0] != hold_time:
need_cfg = True
else:
need_cfg = True
min_hold_time = module.params['min_hold_time']
if min_hold_time:
if int(min_hold_time) != 0 and (int(min_hold_time) > 65535 or int(min_hold_time) < 20):
module.fail_json(
msg='Error: The value of min_hold_time %s is out of [0 or 20 - 65535].' % min_hold_time)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<minHoldTime></minHoldTime>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<minHoldTime>(.*)</minHoldTime>.*', recv_xml)
if re_find:
result["min_hold_time"] = re_find
if re_find[0] != min_hold_time:
need_cfg = True
else:
need_cfg = True
key_chain_name = module.params['key_chain_name']
if key_chain_name:
if len(key_chain_name) > 47 or len(key_chain_name) < 1:
module.fail_json(
msg='Error: The len of key_chain_name %s is out of [1 - 47].' % key_chain_name)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<keyChainName></keyChainName>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<keyChainName>(.*)</keyChainName>.*', recv_xml)
if re_find:
result["key_chain_name"] = re_find
if re_find[0] != key_chain_name:
need_cfg = True
else:
need_cfg = True
conn_retry_time = module.params['conn_retry_time']
if conn_retry_time:
if int(conn_retry_time) > 65535 or int(conn_retry_time) < 1:
module.fail_json(
msg='Error: The value of conn_retry_time %s is out of [1 - 65535].' % conn_retry_time)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<connRetryTime></connRetryTime>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<connRetryTime>(.*)</connRetryTime>.*', recv_xml)
if re_find:
result["conn_retry_time"] = re_find
if re_find[0] != conn_retry_time:
need_cfg = True
else:
need_cfg = True
tcp_mss = module.params['tcp_MSS']
if tcp_mss:
if int(tcp_mss) > 4096 or int(tcp_mss) < 176:
module.fail_json(
msg='Error: The value of tcp_mss %s is out of [176 - 4096].' % tcp_mss)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<tcpMSS></tcpMSS>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<tcpMSS>(.*)</tcpMSS>.*', recv_xml)
if re_find:
result["tcp_MSS"] = re_find
if re_find[0] != tcp_mss:
need_cfg = True
else:
need_cfg = True
mpls_local_ifnet_disable = module.params['mpls_local_ifnet_disable']
if mpls_local_ifnet_disable != 'no_use':
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<mplsLocalIfnetDisable></mplsLocalIfnetDisable>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<mplsLocalIfnetDisable>(.*)</mplsLocalIfnetDisable>.*', recv_xml)
if re_find:
result["mpls_local_ifnet_disable"] = re_find
if re_find[0] != mpls_local_ifnet_disable:
need_cfg = True
else:
need_cfg = True
prepend_global_as = module.params['prepend_global_as']
if prepend_global_as != 'no_use':
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<prependGlobalAs></prependGlobalAs>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<prependGlobalAs>(.*)</prependGlobalAs>.*', recv_xml)
if re_find:
result["prepend_global_as"] = re_find
if re_find[0] != prepend_global_as:
need_cfg = True
else:
need_cfg = True
prepend_fake_as = module.params['prepend_fake_as']
if prepend_fake_as != 'no_use':
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<prependFakeAs></prependFakeAs>" + CE_GET_BGP_PEER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<prependFakeAs>(.*)</prependFakeAs>.*', recv_xml)
if re_find:
result["prepend_fake_as"] = re_find
if re_find[0] != prepend_fake_as:
need_cfg = True
else:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_peer_bfd_merge_args(self, **kwargs):
""" check_peer_bfd_merge_args """
module = kwargs["module"]
result = dict()
need_cfg = False
state = module.params['state']
if state == "absent":
result["need_cfg"] = need_cfg
return result
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name)
peer_addr = module.params['peer_addr']
is_bfd_block = module.params['is_bfd_block']
if is_bfd_block != 'no_use':
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<isBfdBlock></isBfdBlock>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isBfdBlock>(.*)</isBfdBlock>.*', recv_xml)
if re_find:
result["is_bfd_block"] = re_find
if re_find[0] != is_bfd_block:
need_cfg = True
else:
need_cfg = True
multiplier = module.params['multiplier']
if multiplier:
if int(multiplier) > 50 or int(multiplier) < 3:
module.fail_json(
msg='Error: The value of multiplier %s is out of [3 - 50].' % multiplier)
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<multiplier></multiplier>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<multiplier>(.*)</multiplier>.*', recv_xml)
if re_find:
result["multiplier"] = re_find
if re_find[0] != multiplier:
need_cfg = True
else:
need_cfg = True
is_bfd_enable = module.params['is_bfd_enable']
if is_bfd_enable != 'no_use':
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<isBfdEnable></isBfdEnable>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isBfdEnable>(.*)</isBfdEnable>.*', recv_xml)
if re_find:
result["is_bfd_enable"] = re_find
if re_find[0] != is_bfd_enable:
need_cfg = True
else:
need_cfg = True
rx_interval = module.params['rx_interval']
if rx_interval:
if int(rx_interval) > 1000 or int(rx_interval) < 50:
module.fail_json(
msg='Error: The value of rx_interval %s is out of [50 - 1000].' % rx_interval)
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<rxInterval></rxInterval>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<rxInterval>(.*)</rxInterval>.*', recv_xml)
if re_find:
result["rx_interval"] = re_find
if re_find[0] != rx_interval:
need_cfg = True
else:
need_cfg = True
tx_interval = module.params['tx_interval']
if tx_interval:
if int(tx_interval) > 1000 or int(tx_interval) < 50:
module.fail_json(
msg='Error: The value of tx_interval %s is out of [50 - 1000].' % tx_interval)
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<txInterval></txInterval>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<txInterval>(.*)</txInterval>.*', recv_xml)
if re_find:
result["tx_interval"] = re_find
if re_find[0] != tx_interval:
need_cfg = True
else:
need_cfg = True
is_single_hop = module.params['is_single_hop']
if is_single_hop != 'no_use':
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<isSingleHop></isSingleHop>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isSingleHop>(.*)</isSingleHop>.*', recv_xml)
if re_find:
result["is_single_hop"] = re_find
if re_find[0] != is_single_hop:
need_cfg = True
else:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_peer_bfd_delete_args(self, **kwargs):
""" check_peer_bfd_delete_args """
module = kwargs["module"]
result = dict()
need_cfg = False
state = module.params['state']
if state == "present":
result["need_cfg"] = need_cfg
return result
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name)
peer_addr = module.params['peer_addr']
is_bfd_block = module.params['is_bfd_block']
if is_bfd_block != 'no_use':
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<isBfdBlock></isBfdBlock>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<isBfdBlock>(.*)</isBfdBlock>.*', recv_xml)
if re_find:
result["is_bfd_block"] = re_find
if re_find[0] == is_bfd_block:
need_cfg = True
multiplier = module.params['multiplier']
if multiplier:
if int(multiplier) > 50 or int(multiplier) < 3:
module.fail_json(
msg='Error: The value of multiplier %s is out of [3 - 50].' % multiplier)
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<multiplier></multiplier>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<multiplier>(.*)</multiplier>.*', recv_xml)
if re_find:
result["multiplier"] = re_find
if re_find[0] == multiplier:
need_cfg = True
is_bfd_enable = module.params['is_bfd_enable']
if is_bfd_enable != 'no_use':
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<isBfdEnable></isBfdEnable>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<isBfdEnable>(.*)</isBfdEnable>.*', recv_xml)
if re_find:
result["is_bfd_enable"] = re_find
if re_find[0] == is_bfd_enable:
need_cfg = True
rx_interval = module.params['rx_interval']
if rx_interval:
if int(rx_interval) > 1000 or int(rx_interval) < 50:
module.fail_json(
msg='Error: The value of rx_interval %s is out of [50 - 1000].' % rx_interval)
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<rxInterval></rxInterval>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<rxInterval>(.*)</rxInterval>.*', recv_xml)
if re_find:
result["rx_interval"] = re_find
if re_find[0] == rx_interval:
need_cfg = True
tx_interval = module.params['tx_interval']
if tx_interval:
if int(tx_interval) > 1000 or int(tx_interval) < 50:
module.fail_json(
msg='Error: The value of tx_interval %s is out of [50 - 1000].' % tx_interval)
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<txInterval></txInterval>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<txInterval>(.*)</txInterval>.*', recv_xml)
if re_find:
result["tx_interval"] = re_find
if re_find[0] == tx_interval:
need_cfg = True
is_single_hop = module.params['is_single_hop']
if is_single_hop != 'no_use':
conf_str = CE_GET_PEER_BFD_HEADER % (
vrf_name, peer_addr) + "<isSingleHop></isSingleHop>" + CE_GET_PEER_BFD_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<isSingleHop>(.*)</isSingleHop>.*', recv_xml)
if re_find:
result["is_single_hop"] = re_find
if re_find[0] == is_single_hop:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def get_bgp_peer(self, **kwargs):
""" get_bgp_peer """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + \
"<remoteAs></remoteAs>" + CE_GET_BGP_PEER_TAIL
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<peerAddr>(.*)</peerAddr>.*\s.*<remoteAs>(.*)</remoteAs>.*', xml_str)
if re_find:
return re_find
else:
return result
def get_bgp_del_peer(self, **kwargs):
""" get_bgp_del_peer """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name)
conf_str = CE_GET_BGP_PEER_HEADER % vrf_name + CE_GET_BGP_PEER_TAIL
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<peerAddr>(.*)</peerAddr>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_bgp_peer(self, **kwargs):
""" merge_bgp_peer """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
peer_addr = module.params['peer_addr']
remote_as = module.params['remote_as']
conf_str = CE_MERGE_BGP_PEER_HEADER % (
vrf_name, peer_addr) + "<remoteAs>%s</remoteAs>" % remote_as + CE_MERGE_BGP_PEER_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp peer failed.')
cmds = []
cmd = "peer %s as-number %s" % (peer_addr, remote_as)
cmds.append(cmd)
return cmds
def create_bgp_peer(self, **kwargs):
""" create_bgp_peer """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
peer_addr = module.params['peer_addr']
remote_as = module.params['remote_as']
conf_str = CE_CREATE_BGP_PEER_HEADER % (
vrf_name, peer_addr) + "<remoteAs>%s</remoteAs>" % remote_as + CE_CREATE_BGP_PEER_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create bgp peer failed.')
cmds = []
cmd = "peer %s as-number %s" % (peer_addr, remote_as)
cmds.append(cmd)
return cmds
def delete_bgp_peer(self, **kwargs):
""" delete_bgp_peer """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
peer_addr = module.params['peer_addr']
conf_str = CE_DELETE_BGP_PEER_HEADER % (
vrf_name, peer_addr) + CE_DELETE_BGP_PEER_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp peer failed.')
cmds = []
cmd = "undo peer %s" % peer_addr
cmds.append(cmd)
return cmds
def merge_bgp_peer_other(self, **kwargs):
""" merge_bgp_peer """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
peer_addr = module.params['peer_addr']
conf_str = CE_MERGE_BGP_PEER_HEADER % (vrf_name, peer_addr)
cmds = []
description = module.params['description']
if description:
conf_str += "<description>%s</description>" % description
cmd = "peer %s description %s" % (peer_addr, description)
cmds.append(cmd)
fake_as = module.params['fake_as']
if fake_as:
conf_str += "<fakeAs>%s</fakeAs>" % fake_as
cmd = "peer %s local-as %s" % (peer_addr, fake_as)
cmds.append(cmd)
dual_as = module.params['dual_as']
if dual_as != 'no_use':
conf_str += "<dualAs>%s</dualAs>" % dual_as
if dual_as == "true":
cmd = "peer %s local-as %s dual-as" % (peer_addr, fake_as)
else:
cmd = "peer %s local-as %s" % (peer_addr, fake_as)
cmds.append(cmd)
conventional = module.params['conventional']
if conventional != 'no_use':
conf_str += "<conventional>%s</conventional>" % conventional
if conventional == "true":
cmd = "peer %s capability-advertise conventional" % peer_addr
else:
cmd = "undo peer %s capability-advertise conventional" % peer_addr
cmds.append(cmd)
route_refresh = module.params['route_refresh']
if route_refresh != 'no_use':
conf_str += "<routeRefresh>%s</routeRefresh>" % route_refresh
if route_refresh == "true":
cmd = "peer %s capability-advertise route-refresh" % peer_addr
else:
cmd = "undo peer %s capability-advertise route-refresh" % peer_addr
cmds.append(cmd)
four_byte_as = module.params['four_byte_as']
if four_byte_as != 'no_use':
conf_str += "<fourByteAs>%s</fourByteAs>" % four_byte_as
if four_byte_as == "true":
cmd = "peer %s capability-advertise 4-byte-as" % peer_addr
else:
cmd = "undo peer %s capability-advertise 4-byte-as" % peer_addr
cmds.append(cmd)
is_ignore = module.params['is_ignore']
if is_ignore != 'no_use':
conf_str += "<isIgnore>%s</isIgnore>" % is_ignore
if is_ignore == "true":
cmd = "peer %s ignore" % peer_addr
else:
cmd = "undo peer %s ignore" % peer_addr
cmds.append(cmd)
local_if_name = module.params['local_if_name']
if local_if_name:
conf_str += "<localIfName>%s</localIfName>" % local_if_name
cmd = "peer %s connect-interface local_if_name" % peer_addr
cmds.append(cmd)
ebgp_max_hop = module.params['ebgp_max_hop']
if ebgp_max_hop:
conf_str += "<ebgpMaxHop>%s</ebgpMaxHop>" % ebgp_max_hop
cmd = "peer %s ebgp-max-hop %s" % (peer_addr, ebgp_max_hop)
cmds.append(cmd)
valid_ttl_hops = module.params['valid_ttl_hops']
if valid_ttl_hops:
conf_str += "<validTtlHops>%s</validTtlHops>" % valid_ttl_hops
cmd = "peer %s valid-ttl-hops %s" % (peer_addr, valid_ttl_hops)
cmds.append(cmd)
connect_mode = module.params['connect_mode']
if connect_mode:
conf_str += "<connectMode>%s</connectMode>" % connect_mode
if connect_mode == "listenOnly":
cmd = "peer %s listen-only" % peer_addr
cmds.append(cmd)
elif connect_mode == "connectOnly":
cmd = "peer %s connect-only" % peer_addr
cmds.append(cmd)
elif connect_mode == "null":
cmd = "peer %s listen-only" % peer_addr
cmds.append(cmd)
cmd = "peer %s connect-only" % peer_addr
cmds.append(cmd)
is_log_change = module.params['is_log_change']
if is_log_change != 'no_use':
conf_str += "<isLogChange>%s</isLogChange>" % is_log_change
if is_log_change == "true":
cmd = "peer %s log-change" % peer_addr
else:
cmd = "undo peer %s log-change" % peer_addr
cmds.append(cmd)
pswd_type = module.params['pswd_type']
if pswd_type:
conf_str += "<pswdType>%s</pswdType>" % pswd_type
pswd_cipher_text = module.params['pswd_cipher_text']
if pswd_cipher_text:
conf_str += "<pswdCipherText>%s</pswdCipherText>" % pswd_cipher_text
if pswd_type == "cipher":
cmd = "peer %s password cipher %s" % (
peer_addr, pswd_cipher_text)
elif pswd_type == "simple":
cmd = "peer %s password simple %s" % (
peer_addr, pswd_cipher_text)
cmds.append(cmd)
keep_alive_time = module.params['keep_alive_time']
if keep_alive_time:
conf_str += "<keepAliveTime>%s</keepAliveTime>" % keep_alive_time
cmd = "peer %s timer keepalive %s" % (peer_addr, keep_alive_time)
cmds.append(cmd)
hold_time = module.params['hold_time']
if hold_time:
conf_str += "<holdTime>%s</holdTime>" % hold_time
cmd = "peer %s timer hold %s" % (peer_addr, hold_time)
cmds.append(cmd)
min_hold_time = module.params['min_hold_time']
if min_hold_time:
conf_str += "<minHoldTime>%s</minHoldTime>" % min_hold_time
cmd = "peer %s timer min-holdtime %s" % (peer_addr, min_hold_time)
cmds.append(cmd)
key_chain_name = module.params['key_chain_name']
if key_chain_name:
conf_str += "<keyChainName>%s</keyChainName>" % key_chain_name
cmd = "peer %s keychain %s" % (peer_addr, key_chain_name)
cmds.append(cmd)
conn_retry_time = module.params['conn_retry_time']
if conn_retry_time:
conf_str += "<connRetryTime>%s</connRetryTime>" % conn_retry_time
cmd = "peer %s timer connect-retry %s" % (
peer_addr, conn_retry_time)
cmds.append(cmd)
tcp_mss = module.params['tcp_MSS']
if tcp_mss:
conf_str += "<tcpMSS>%s</tcpMSS>" % tcp_mss
cmd = "peer %s tcp-mss %s" % (peer_addr, tcp_mss)
cmds.append(cmd)
mpls_local_ifnet_disable = module.params['mpls_local_ifnet_disable']
if mpls_local_ifnet_disable != 'no_use':
conf_str += "<mplsLocalIfnetDisable>%s</mplsLocalIfnetDisable>" % mpls_local_ifnet_disable
prepend_global_as = module.params['prepend_global_as']
if prepend_global_as != 'no_use':
conf_str += "<prependGlobalAs>%s</prependGlobalAs>" % prepend_global_as
if prepend_global_as == "true":
cmd = "peer %s public-as-only" % peer_addr
else:
cmd = "undo peer %s public-as-only" % peer_addr
cmds.append(cmd)
prepend_fake_as = module.params['prepend_fake_as']
if prepend_fake_as != 'no_use':
conf_str += "<prependFakeAs>%s</prependFakeAs>" % prepend_fake_as
conf_str += CE_MERGE_BGP_PEER_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp peer other failed.')
return cmds
def merge_peer_bfd(self, **kwargs):
""" merge_peer_bfd """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
peer_addr = module.params['peer_addr']
conf_str = CE_MERGE_PEER_BFD_HEADER % (vrf_name, peer_addr)
cmds = []
is_bfd_block = module.params['is_bfd_block']
if is_bfd_block != 'no_use':
conf_str += "<isBfdBlock>%s</isBfdBlock>" % is_bfd_block
if is_bfd_block == "true":
cmd = "peer %s bfd block" % peer_addr
else:
cmd = "undo peer %s bfd block" % peer_addr
cmds.append(cmd)
multiplier = module.params['multiplier']
if multiplier:
conf_str += "<multiplier>%s</multiplier>" % multiplier
cmd = "peer %s bfd detect-multiplier %s" % (peer_addr, multiplier)
cmds.append(cmd)
is_bfd_enable = module.params['is_bfd_enable']
if is_bfd_enable != 'no_use':
conf_str += "<isBfdEnable>%s</isBfdEnable>" % is_bfd_enable
if is_bfd_enable == "true":
cmd = "peer %s bfd enable" % peer_addr
else:
cmd = "undo peer %s bfd enable" % peer_addr
cmds.append(cmd)
rx_interval = module.params['rx_interval']
if rx_interval:
conf_str += "<rxInterval>%s</rxInterval>" % rx_interval
cmd = "peer %s bfd min-rx-interval %s" % (peer_addr, rx_interval)
cmds.append(cmd)
tx_interval = module.params['tx_interval']
if tx_interval:
conf_str += "<txInterval>%s</txInterval>" % tx_interval
cmd = "peer %s bfd min-tx-interval %s" % (peer_addr, tx_interval)
cmds.append(cmd)
is_single_hop = module.params['is_single_hop']
if is_single_hop != 'no_use':
conf_str += "<isSingleHop>%s</isSingleHop>" % is_single_hop
if is_single_hop == "true":
cmd = "peer %s bfd enable single-hop-prefer" % peer_addr
else:
cmd = "undo peer %s bfd enable single-hop-prefer" % peer_addr
cmds.append(cmd)
conf_str += CE_MERGE_PEER_BFD_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge peer bfd failed.')
return cmds
def delete_peer_bfd(self, **kwargs):
""" delete_peer_bfd """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
peer_addr = module.params['peer_addr']
conf_str = CE_DELETE_PEER_BFD_HEADER % (vrf_name, peer_addr)
cmds = []
is_bfd_block = module.params['is_bfd_block']
if is_bfd_block != 'no_use':
conf_str += "<isBfdBlock>%s</isBfdBlock>" % is_bfd_block
cmd = "undo peer %s bfd block" % peer_addr
cmds.append(cmd)
multiplier = module.params['multiplier']
if multiplier:
conf_str += "<multiplier>%s</multiplier>" % multiplier
cmd = "undo peer %s bfd detect-multiplier %s" % (
peer_addr, multiplier)
cmds.append(cmd)
is_bfd_enable = module.params['is_bfd_enable']
if is_bfd_enable != 'no_use':
conf_str += "<isBfdEnable>%s</isBfdEnable>" % is_bfd_enable
cmd = "undo peer %s bfd enable" % peer_addr
cmds.append(cmd)
rx_interval = module.params['rx_interval']
if rx_interval:
conf_str += "<rxInterval>%s</rxInterval>" % rx_interval
cmd = "undo peer %s bfd min-rx-interval %s" % (
peer_addr, rx_interval)
cmds.append(cmd)
tx_interval = module.params['tx_interval']
if tx_interval:
conf_str += "<txInterval>%s</txInterval>" % tx_interval
cmd = "undo peer %s bfd min-tx-interval %s" % (
peer_addr, tx_interval)
cmds.append(cmd)
is_single_hop = module.params['is_single_hop']
if is_single_hop != 'no_use':
conf_str += "<isSingleHop>%s</isSingleHop>" % is_single_hop
cmd = "undo peer %s bfd enable single-hop-prefer" % peer_addr
cmds.append(cmd)
conf_str += CE_DELETE_PEER_BFD_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete peer bfd failed.')
return cmds
def main():
""" main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
vrf_name=dict(type='str', required=True),
peer_addr=dict(type='str', required=True),
remote_as=dict(type='str', required=True),
description=dict(type='str'),
fake_as=dict(type='str'),
dual_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
conventional=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
route_refresh=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
four_byte_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
is_ignore=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
local_if_name=dict(type='str'),
ebgp_max_hop=dict(type='str'),
valid_ttl_hops=dict(type='str'),
connect_mode=dict(choices=['listenOnly', 'connectOnly', 'null']),
is_log_change=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
pswd_type=dict(choices=['null', 'cipher', 'simple']),
pswd_cipher_text=dict(type='str', no_log=True),
keep_alive_time=dict(type='str'),
hold_time=dict(type='str'),
min_hold_time=dict(type='str'),
key_chain_name=dict(type='str'),
conn_retry_time=dict(type='str'),
tcp_MSS=dict(type='str'),
mpls_local_ifnet_disable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
prepend_global_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
prepend_fake_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
is_bfd_block=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
multiplier=dict(type='str'),
is_bfd_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
rx_interval=dict(type='str'),
tx_interval=dict(type='str'),
is_single_hop=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']))
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
vrf_name = module.params['vrf_name']
peer_addr = module.params['peer_addr']
remote_as = module.params['remote_as']
description = module.params['description']
fake_as = module.params['fake_as']
dual_as = module.params['dual_as']
conventional = module.params['conventional']
route_refresh = module.params['route_refresh']
four_byte_as = module.params['four_byte_as']
is_ignore = module.params['is_ignore']
local_if_name = module.params['local_if_name']
ebgp_max_hop = module.params['ebgp_max_hop']
valid_ttl_hops = module.params['valid_ttl_hops']
connect_mode = module.params['connect_mode']
is_log_change = module.params['is_log_change']
pswd_type = module.params['pswd_type']
pswd_cipher_text = module.params['pswd_cipher_text']
keep_alive_time = module.params['keep_alive_time']
hold_time = module.params['hold_time']
min_hold_time = module.params['min_hold_time']
key_chain_name = module.params['key_chain_name']
conn_retry_time = module.params['conn_retry_time']
tcp_mss = module.params['tcp_MSS']
mpls_local_ifnet_disable = module.params['mpls_local_ifnet_disable']
prepend_global_as = module.params['prepend_global_as']
prepend_fake_as = module.params['prepend_fake_as']
is_bfd_block = module.params['is_bfd_block']
multiplier = module.params['multiplier']
is_bfd_enable = module.params['is_bfd_enable']
rx_interval = module.params['rx_interval']
tx_interval = module.params['tx_interval']
is_single_hop = module.params['is_single_hop']
ce_bgp_peer_obj = BgpNeighbor()
# get proposed
proposed["state"] = state
if vrf_name:
proposed["vrf_name"] = vrf_name
if peer_addr:
proposed["peer_addr"] = peer_addr
if remote_as:
proposed["remote_as"] = remote_as
if description:
proposed["description"] = description
if fake_as:
proposed["fake_as"] = fake_as
if dual_as != 'no_use':
proposed["dual_as"] = dual_as
if conventional != 'no_use':
proposed["conventional"] = conventional
if route_refresh != 'no_use':
proposed["route_refresh"] = route_refresh
if four_byte_as != 'no_use':
proposed["four_byte_as"] = four_byte_as
if is_ignore != 'no_use':
proposed["is_ignore"] = is_ignore
if local_if_name:
proposed["local_if_name"] = local_if_name
if ebgp_max_hop:
proposed["ebgp_max_hop"] = ebgp_max_hop
if valid_ttl_hops:
proposed["valid_ttl_hops"] = valid_ttl_hops
if connect_mode:
proposed["connect_mode"] = connect_mode
if is_log_change != 'no_use':
proposed["is_log_change"] = is_log_change
if pswd_type:
proposed["pswd_type"] = pswd_type
if pswd_cipher_text:
proposed["pswd_cipher_text"] = pswd_cipher_text
if keep_alive_time:
proposed["keep_alive_time"] = keep_alive_time
if hold_time:
proposed["hold_time"] = hold_time
if min_hold_time:
proposed["min_hold_time"] = min_hold_time
if key_chain_name:
proposed["key_chain_name"] = key_chain_name
if conn_retry_time:
proposed["conn_retry_time"] = conn_retry_time
if tcp_mss:
proposed["tcp_MSS"] = tcp_mss
if mpls_local_ifnet_disable != 'no_use':
proposed["mpls_local_ifnet_disable"] = mpls_local_ifnet_disable
if prepend_global_as != 'no_use':
proposed["prepend_global_as"] = prepend_global_as
if prepend_fake_as != 'no_use':
proposed["prepend_fake_as"] = prepend_fake_as
if is_bfd_block != 'no_use':
proposed["is_bfd_block"] = is_bfd_block
if multiplier:
proposed["multiplier"] = multiplier
if is_bfd_enable != 'no_use':
proposed["is_bfd_enable"] = is_bfd_enable
if rx_interval:
proposed["rx_interval"] = rx_interval
if tx_interval:
proposed["tx_interval"] = tx_interval
if is_single_hop != 'no_use':
proposed["is_single_hop"] = is_single_hop
if not ce_bgp_peer_obj:
module.fail_json(msg='Error: Init module failed.')
need_bgp_peer_enable = ce_bgp_peer_obj.check_bgp_peer_args(module=module)
need_bgp_peer_other_rst = ce_bgp_peer_obj.check_bgp_peer_other_args(
module=module)
need_peer_bfd_merge_rst = ce_bgp_peer_obj.check_peer_bfd_merge_args(
module=module)
need_peer_bfd_del_rst = ce_bgp_peer_obj.check_peer_bfd_delete_args(
module=module)
# bgp peer config
if need_bgp_peer_enable["need_cfg"]:
if state == "present":
if remote_as:
bgp_peer_exist = ce_bgp_peer_obj.get_bgp_peer(module=module)
existing["bgp peer"] = bgp_peer_exist
bgp_peer_new = (peer_addr, remote_as)
if len(bgp_peer_exist) == 0:
cmd = ce_bgp_peer_obj.create_bgp_peer(module=module)
changed = True
for item in cmd:
updates.append(item)
elif bgp_peer_new in bgp_peer_exist:
pass
else:
cmd = ce_bgp_peer_obj.merge_bgp_peer(module=module)
changed = True
for item in cmd:
updates.append(item)
bgp_peer_end = ce_bgp_peer_obj.get_bgp_peer(module=module)
end_state["bgp peer"] = bgp_peer_end
else:
bgp_peer_exist = ce_bgp_peer_obj.get_bgp_del_peer(module=module)
existing["bgp peer"] = bgp_peer_exist
bgp_peer_new = (peer_addr)
if len(bgp_peer_exist) == 0:
pass
elif bgp_peer_new in bgp_peer_exist:
cmd = ce_bgp_peer_obj.delete_bgp_peer(module=module)
changed = True
for item in cmd:
updates.append(item)
bgp_peer_end = ce_bgp_peer_obj.get_bgp_del_peer(module=module)
end_state["bgp peer"] = bgp_peer_end
# bgp peer other args
exist_tmp = dict()
for item in need_bgp_peer_other_rst:
if item != "need_cfg":
exist_tmp[item] = need_bgp_peer_other_rst[item]
if exist_tmp:
existing["bgp peer other"] = exist_tmp
if need_bgp_peer_other_rst["need_cfg"]:
if state == "present":
cmd = ce_bgp_peer_obj.merge_bgp_peer_other(module=module)
changed = True
for item in cmd:
updates.append(item)
need_bgp_peer_other_rst = ce_bgp_peer_obj.check_bgp_peer_other_args(
module=module)
end_tmp = dict()
for item in need_bgp_peer_other_rst:
if item != "need_cfg":
end_tmp[item] = need_bgp_peer_other_rst[item]
if end_tmp:
end_state["bgp peer other"] = end_tmp
# peer bfd args
if state == "present":
exist_tmp = dict()
for item in need_peer_bfd_merge_rst:
if item != "need_cfg":
exist_tmp[item] = need_peer_bfd_merge_rst[item]
if exist_tmp:
existing["peer bfd"] = exist_tmp
if need_peer_bfd_merge_rst["need_cfg"]:
cmd = ce_bgp_peer_obj.merge_peer_bfd(module=module)
changed = True
for item in cmd:
updates.append(item)
need_peer_bfd_merge_rst = ce_bgp_peer_obj.check_peer_bfd_merge_args(
module=module)
end_tmp = dict()
for item in need_peer_bfd_merge_rst:
if item != "need_cfg":
end_tmp[item] = need_peer_bfd_merge_rst[item]
if end_tmp:
end_state["peer bfd"] = end_tmp
else:
exist_tmp = dict()
for item in need_peer_bfd_del_rst:
if item != "need_cfg":
exist_tmp[item] = need_peer_bfd_del_rst[item]
if exist_tmp:
existing["peer bfd"] = exist_tmp
# has already delete with bgp peer
need_peer_bfd_del_rst = ce_bgp_peer_obj.check_peer_bfd_delete_args(
module=module)
end_tmp = dict()
for item in need_peer_bfd_del_rst:
if item != "need_cfg":
end_tmp[item] = need_peer_bfd_del_rst[item]
if end_tmp:
end_state["peer bfd"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
|
yesudeep/greatship | refs/heads/master | app/console/app/pygments/lexers/__init__.py | 27 | # -*- coding: utf-8 -*-
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: 2006-2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import fnmatch
import types
from os.path import basename
try:
set
except NameError:
from sets import Set as set
from pygments.lexers._mapping import LEXERS
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer'] + LEXERS.keys()
_lexer_cache = {}
def _load_lexers(module_name):
"""
Load a lexer (and all others in the module too).
"""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""
Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in LEXERS.itervalues():
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""
Lookup a lexer class by name. Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in LEXERS.itervalues():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def get_lexer_by_name(_alias, **options):
"""
Get a lexer by an alias.
"""
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.itervalues():
if _alias in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def get_lexer_for_filename(_fn, **options):
"""
Get a lexer for a filename.
"""
fn = basename(_fn)
for modname, name, _, filenames, _ in LEXERS.itervalues():
for filename in filenames:
if fnmatch.fnmatch(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
for filename in cls.filenames:
if fnmatch.fnmatch(fn, filename):
return cls(**options)
raise ClassNotFound('no lexer for filename %r found' % _fn)
def get_lexer_for_mimetype(_mime, **options):
"""
Get a lexer for a mimetype.
"""
for modname, name, _, _, mimetypes in LEXERS.itervalues():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses():
"""
Return an iterator over all lexer classes.
"""
for module_name, name, _, _, _ in LEXERS.itervalues():
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
for lexer in find_plugin_lexers():
yield lexer
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = None
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if fnmatch.fnmatch(fn, filename):
matching_lexers.add(lexer)
primary = lexer
for filename in lexer.alias_filenames:
if fnmatch.fnmatch(fn, filename):
matching_lexers.add(lexer)
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
result.sort()
if not result[-1][0] and primary is not None:
return primary(**options)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""
Guess a lexer by strong distinctions in the text (eg, shebang).
"""
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
import sys
oldmod = sys.modules['pygments.lexers']
newmod = _automodule('pygments.lexers')
newmod.__dict__.update(oldmod.__dict__)
sys.modules['pygments.lexers'] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
mohamedhagag/dvit-odoo | refs/heads/10.0 | dvit_sale_line_no/__init__.py | 10 | # -*- coding: uَtَf-8 -*-
َfrom . imporَt models |
SuperStarPL/mopidy | refs/heads/develop | tests/m3u/test_playlists.py | 10 | # encoding: utf-8
from __future__ import absolute_import, unicode_literals
import os
import platform
import shutil
import tempfile
import unittest
import urllib
import pykka
from mopidy import core
from mopidy.internal import deprecation
from mopidy.m3u import actor
from mopidy.m3u.translator import playlist_uri_to_path
from mopidy.models import Playlist, Track
from tests import dummy_audio, path_to_data_dir
from tests.m3u import generate_song
class M3UPlaylistsProviderTest(unittest.TestCase):
backend_class = actor.M3UBackend
config = {
'm3u': {
'playlists_dir': path_to_data_dir(''),
}
}
def setUp(self): # noqa: N802
self.config['m3u']['playlists_dir'] = tempfile.mkdtemp()
self.playlists_dir = self.config['m3u']['playlists_dir']
audio = dummy_audio.create_proxy()
backend = actor.M3UBackend.start(
config=self.config, audio=audio).proxy()
self.core = core.Core(backends=[backend])
def tearDown(self): # noqa: N802
pykka.ActorRegistry.stop_all()
if os.path.exists(self.playlists_dir):
shutil.rmtree(self.playlists_dir)
def test_created_playlist_is_persisted(self):
uri = 'm3u:test.m3u'
path = playlist_uri_to_path(uri, self.playlists_dir)
self.assertFalse(os.path.exists(path))
playlist = self.core.playlists.create('test')
self.assertEqual('test', playlist.name)
self.assertEqual(uri, playlist.uri)
self.assertTrue(os.path.exists(path))
def test_create_sanitizes_playlist_name(self):
playlist = self.core.playlists.create(' ../../test FOO baR ')
self.assertEqual('..|..|test FOO baR', playlist.name)
path = playlist_uri_to_path(playlist.uri, self.playlists_dir)
self.assertEqual(self.playlists_dir, os.path.dirname(path))
self.assertTrue(os.path.exists(path))
def test_saved_playlist_is_persisted(self):
uri1 = 'm3u:test1.m3u'
uri2 = 'm3u:test2.m3u'
path1 = playlist_uri_to_path(uri1, self.playlists_dir)
path2 = playlist_uri_to_path(uri2, self.playlists_dir)
playlist = self.core.playlists.create('test1')
self.assertEqual('test1', playlist.name)
self.assertEqual(uri1, playlist.uri)
self.assertTrue(os.path.exists(path1))
self.assertFalse(os.path.exists(path2))
playlist = self.core.playlists.save(playlist.replace(name='test2'))
self.assertEqual('test2', playlist.name)
self.assertEqual(uri2, playlist.uri)
self.assertFalse(os.path.exists(path1))
self.assertTrue(os.path.exists(path2))
def test_deleted_playlist_is_removed(self):
uri = 'm3u:test.m3u'
path = playlist_uri_to_path(uri, self.playlists_dir)
self.assertFalse(os.path.exists(path))
playlist = self.core.playlists.create('test')
self.assertEqual('test', playlist.name)
self.assertEqual(uri, playlist.uri)
self.assertTrue(os.path.exists(path))
self.core.playlists.delete(playlist.uri)
self.assertFalse(os.path.exists(path))
def test_playlist_contents_is_written_to_disk(self):
track = Track(uri=generate_song(1))
playlist = self.core.playlists.create('test')
playlist = self.core.playlists.save(playlist.replace(tracks=[track]))
path = playlist_uri_to_path(playlist.uri, self.playlists_dir)
with open(path) as f:
contents = f.read()
self.assertEqual(track.uri, contents.strip())
def test_extended_playlist_contents_is_written_to_disk(self):
track = Track(uri=generate_song(1), name='Test', length=60000)
playlist = self.core.playlists.create('test')
playlist = self.core.playlists.save(playlist.replace(tracks=[track]))
path = playlist_uri_to_path(playlist.uri, self.playlists_dir)
with open(path) as f:
m3u = f.read().splitlines()
self.assertEqual(['#EXTM3U', '#EXTINF:60,Test', track.uri], m3u)
def test_latin1_playlist_contents_is_written_to_disk(self):
track = Track(uri=generate_song(1), name='Test\x9f', length=60000)
playlist = self.core.playlists.create('test')
playlist = self.core.playlists.save(playlist.copy(tracks=[track]))
path = playlist_uri_to_path(playlist.uri, self.playlists_dir)
with open(path, 'rb') as f:
m3u = f.read().splitlines()
self.assertEqual([b'#EXTM3U', b'#EXTINF:60,Test\x9f', track.uri], m3u)
def test_utf8_playlist_contents_is_replaced_and_written_to_disk(self):
track = Track(uri=generate_song(1), name='Test\u07b4', length=60000)
playlist = self.core.playlists.create('test')
playlist = self.core.playlists.save(playlist.copy(tracks=[track]))
path = playlist_uri_to_path(playlist.uri, self.playlists_dir)
with open(path, 'rb') as f:
m3u = f.read().splitlines()
self.assertEqual([b'#EXTM3U', b'#EXTINF:60,Test?', track.uri], m3u)
def test_playlists_are_loaded_at_startup(self):
track = Track(uri='dummy:track:path2')
playlist = self.core.playlists.create('test')
playlist = playlist.replace(tracks=[track])
playlist = self.core.playlists.save(playlist)
self.assertEqual(len(self.core.playlists.as_list()), 1)
result = self.core.playlists.lookup(playlist.uri)
self.assertEqual(playlist.uri, result.uri)
self.assertEqual(playlist.name, result.name)
self.assertEqual(track.uri, result.tracks[0].uri)
def test_load_playlist_with_nonfilesystem_encoding_of_filename(self):
uri = 'm3u:%s.m3u' % urllib.quote('øæå'.encode('latin-1'))
path = playlist_uri_to_path(uri, self.playlists_dir)
with open(path, 'wb+') as f:
f.write(b'#EXTM3U\n')
self.core.playlists.refresh()
self.assertEqual(len(self.core.playlists.as_list()), 1)
result = self.core.playlists.as_list()
if platform.system() == 'Darwin':
self.assertEqual('%F8%E6%E5', result[0].name)
else:
self.assertEqual('\ufffd\ufffd\ufffd', result[0].name)
@unittest.SkipTest
def test_playlists_dir_is_created(self):
pass
def test_create_returns_playlist_with_name_set(self):
playlist = self.core.playlists.create('test')
self.assertEqual(playlist.name, 'test')
def test_create_returns_playlist_with_uri_set(self):
playlist = self.core.playlists.create('test')
self.assert_(playlist.uri)
def test_create_adds_playlist_to_playlists_collection(self):
playlist = self.core.playlists.create('test')
playlists = self.core.playlists.as_list()
self.assertIn(playlist.uri, [ref.uri for ref in playlists])
def test_as_list_empty_to_start_with(self):
self.assertEqual(len(self.core.playlists.as_list()), 0)
def test_delete_non_existant_playlist(self):
self.core.playlists.delete('m3u:unknown')
def test_delete_playlist_removes_it_from_the_collection(self):
playlist = self.core.playlists.create('test')
self.assertEqual(playlist, self.core.playlists.lookup(playlist.uri))
self.core.playlists.delete(playlist.uri)
self.assertIsNone(self.core.playlists.lookup(playlist.uri))
def test_delete_playlist_without_file(self):
playlist = self.core.playlists.create('test')
self.assertEqual(playlist, self.core.playlists.lookup(playlist.uri))
path = playlist_uri_to_path(playlist.uri, self.playlists_dir)
self.assertTrue(os.path.exists(path))
os.remove(path)
self.assertFalse(os.path.exists(path))
self.core.playlists.delete(playlist.uri)
self.assertIsNone(self.core.playlists.lookup(playlist.uri))
def test_lookup_finds_playlist_by_uri(self):
original_playlist = self.core.playlists.create('test')
looked_up_playlist = self.core.playlists.lookup(original_playlist.uri)
self.assertEqual(original_playlist, looked_up_playlist)
def test_refresh(self):
playlist = self.core.playlists.create('test')
self.assertEqual(playlist, self.core.playlists.lookup(playlist.uri))
self.core.playlists.refresh()
self.assertEqual(playlist, self.core.playlists.lookup(playlist.uri))
def test_save_replaces_existing_playlist_with_updated_playlist(self):
playlist1 = self.core.playlists.create('test1')
self.assertEqual(playlist1, self.core.playlists.lookup(playlist1.uri))
playlist2 = playlist1.replace(name='test2')
playlist2 = self.core.playlists.save(playlist2)
self.assertIsNone(self.core.playlists.lookup(playlist1.uri))
self.assertEqual(playlist2, self.core.playlists.lookup(playlist2.uri))
def test_create_replaces_existing_playlist_with_updated_playlist(self):
track = Track(uri=generate_song(1))
playlist1 = self.core.playlists.create('test')
playlist1 = self.core.playlists.save(playlist1.replace(tracks=[track]))
self.assertEqual(playlist1, self.core.playlists.lookup(playlist1.uri))
playlist2 = self.core.playlists.create('test')
self.assertEqual(playlist1.uri, playlist2.uri)
self.assertNotEqual(
playlist1, self.core.playlists.lookup(playlist1.uri))
self.assertEqual(playlist2, self.core.playlists.lookup(playlist1.uri))
def test_save_playlist_with_new_uri(self):
uri = 'm3u:test.m3u'
with self.assertRaises(AssertionError):
self.core.playlists.save(Playlist(uri=uri))
path = playlist_uri_to_path(uri, self.playlists_dir)
self.assertFalse(os.path.exists(path))
def test_playlist_with_unknown_track(self):
track = Track(uri='file:///dev/null')
playlist = self.core.playlists.create('test')
playlist = playlist.replace(tracks=[track])
playlist = self.core.playlists.save(playlist)
self.assertEqual(len(self.core.playlists.as_list()), 1)
result = self.core.playlists.lookup('m3u:test.m3u')
self.assertEqual('m3u:test.m3u', result.uri)
self.assertEqual(playlist.name, result.name)
self.assertEqual(track.uri, result.tracks[0].uri)
def test_playlist_sort_order(self):
def check_order(playlists, names):
self.assertEqual(names, [playlist.name for playlist in playlists])
self.core.playlists.create('c')
self.core.playlists.create('a')
self.core.playlists.create('b')
check_order(self.core.playlists.as_list(), ['a', 'b', 'c'])
self.core.playlists.refresh()
check_order(self.core.playlists.as_list(), ['a', 'b', 'c'])
playlist = self.core.playlists.lookup('m3u:a.m3u')
playlist = playlist.replace(name='d')
playlist = self.core.playlists.save(playlist)
check_order(self.core.playlists.as_list(), ['b', 'c', 'd'])
self.core.playlists.delete('m3u:c.m3u')
check_order(self.core.playlists.as_list(), ['b', 'd'])
def test_get_items_returns_item_refs(self):
track = Track(uri='dummy:a', name='A', length=60000)
playlist = self.core.playlists.create('test')
playlist = self.core.playlists.save(playlist.replace(tracks=[track]))
item_refs = self.core.playlists.get_items(playlist.uri)
self.assertEqual(len(item_refs), 1)
self.assertEqual(item_refs[0].type, 'track')
self.assertEqual(item_refs[0].uri, 'dummy:a')
self.assertEqual(item_refs[0].name, 'A')
def test_get_items_of_unknown_playlist_returns_none(self):
item_refs = self.core.playlists.get_items('dummy:unknown')
self.assertIsNone(item_refs)
class DeprecatedM3UPlaylistsProviderTest(M3UPlaylistsProviderTest):
def run(self, result=None):
with deprecation.ignore(ids=['core.playlists.filter',
'core.playlists.filter:kwargs_criteria',
'core.playlists.get_playlists']):
return super(DeprecatedM3UPlaylistsProviderTest, self).run(result)
def test_filter_without_criteria(self):
self.assertEqual(self.core.playlists.get_playlists(),
self.core.playlists.filter())
def test_filter_with_wrong_criteria(self):
self.assertEqual([], self.core.playlists.filter(name='foo'))
def test_filter_with_right_criteria(self):
playlist = self.core.playlists.create('test')
playlists = self.core.playlists.filter(name='test')
self.assertEqual([playlist], playlists)
def test_filter_by_name_returns_single_match(self):
self.core.playlists.create('a')
playlist = self.core.playlists.create('b')
self.assertEqual([playlist], self.core.playlists.filter(name='b'))
def test_filter_by_name_returns_no_matches(self):
self.core.playlists.create('a')
self.core.playlists.create('b')
self.assertEqual([], self.core.playlists.filter(name='c'))
|
sunlianqiang/kbengine | refs/heads/master | kbe/src/lib/python/Tools/freeze/makefreeze.py | 37 | import marshal
import bkfile
# Write a file containing frozen code for the modules in the dictionary.
header = """
#include "Python.h"
static struct _frozen _PyImport_FrozenModules[] = {
"""
trailer = """\
{0, 0, 0} /* sentinel */
};
"""
# if __debug__ == 0 (i.e. -O option given), set Py_OptimizeFlag in frozen app.
default_entry_point = """
int
main(int argc, char **argv)
{
extern int Py_FrozenMain(int, char **);
""" + ((not __debug__ and """
Py_OptimizeFlag++;
""") or "") + """
PyImport_FrozenModules = _PyImport_FrozenModules;
return Py_FrozenMain(argc, argv);
}
"""
def makefreeze(base, dict, debug=0, entry_point=None, fail_import=()):
if entry_point is None: entry_point = default_entry_point
done = []
files = []
mods = sorted(dict.keys())
for mod in mods:
m = dict[mod]
mangled = "__".join(mod.split("."))
if m.__code__:
file = 'M_' + mangled + '.c'
outfp = bkfile.open(base + file, 'w')
files.append(file)
if debug:
print("freezing", mod, "...")
str = marshal.dumps(m.__code__)
size = len(str)
if m.__path__:
# Indicate package by negative size
size = -size
done.append((mod, mangled, size))
writecode(outfp, mangled, str)
outfp.close()
if debug:
print("generating table of frozen modules")
outfp = bkfile.open(base + 'frozen.c', 'w')
for mod, mangled, size in done:
outfp.write('extern unsigned char M_%s[];\n' % mangled)
outfp.write(header)
for mod, mangled, size in done:
outfp.write('\t{"%s", M_%s, %d},\n' % (mod, mangled, size))
outfp.write('\n')
# The following modules have a NULL code pointer, indicating
# that the frozen program should not search for them on the host
# system. Importing them will *always* raise an ImportError.
# The zero value size is never used.
for mod in fail_import:
outfp.write('\t{"%s", NULL, 0},\n' % (mod,))
outfp.write(trailer)
outfp.write(entry_point)
outfp.close()
return files
# Write a C initializer for a module containing the frozen python code.
# The array is called M_<mod>.
def writecode(outfp, mod, str):
outfp.write('unsigned char M_%s[] = {' % mod)
for i in range(0, len(str), 16):
outfp.write('\n\t')
for c in bytes(str[i:i+16]):
outfp.write('%d,' % c)
outfp.write('\n};\n')
## def writecode(outfp, mod, str):
## outfp.write('unsigned char M_%s[%d] = "%s";\n' % (mod, len(str),
## '\\"'.join(map(lambda s: repr(s)[1:-1], str.split('"')))))
|
scottquiring/Udacity_Deeplearning | refs/heads/master | transfer-learning/tensorflow_vgg/vgg19_trainable.py | 153 | import tensorflow as tf
import numpy as np
from functools import reduce
VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg19:
"""
A trainable version VGG19.
"""
def __init__(self, vgg19_npy_path=None, trainable=True, dropout=0.5):
if vgg19_npy_path is not None:
self.data_dict = np.load(vgg19_npy_path, encoding='latin1').item()
else:
self.data_dict = None
self.var_dict = {}
self.trainable = trainable
self.dropout = dropout
def build(self, rgb, train_mode=None):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
:param train_mode: a bool tensor, usually a placeholder: if True, dropout will be turned on
"""
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
self.conv1_1 = self.conv_layer(bgr, 3, 64, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, 64, 64, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, 64, 128, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, 128, 128, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, 128, 256, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, 256, 256, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, 256, 256, "conv3_3")
self.conv3_4 = self.conv_layer(self.conv3_3, 256, 256, "conv3_4")
self.pool3 = self.max_pool(self.conv3_4, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, 256, 512, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, 512, 512, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, 512, 512, "conv4_3")
self.conv4_4 = self.conv_layer(self.conv4_3, 512, 512, "conv4_4")
self.pool4 = self.max_pool(self.conv4_4, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, 512, 512, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, 512, 512, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, 512, 512, "conv5_3")
self.conv5_4 = self.conv_layer(self.conv5_3, 512, 512, "conv5_4")
self.pool5 = self.max_pool(self.conv5_4, 'pool5')
self.fc6 = self.fc_layer(self.pool5, 25088, 4096, "fc6") # 25088 = ((224 // (2 ** 5)) ** 2) * 512
self.relu6 = tf.nn.relu(self.fc6)
if train_mode is not None:
self.relu6 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu6, self.dropout), lambda: self.relu6)
elif self.trainable:
self.relu6 = tf.nn.dropout(self.relu6, self.dropout)
self.fc7 = self.fc_layer(self.relu6, 4096, 4096, "fc7")
self.relu7 = tf.nn.relu(self.fc7)
if train_mode is not None:
self.relu7 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu7, self.dropout), lambda: self.relu7)
elif self.trainable:
self.relu7 = tf.nn.dropout(self.relu7, self.dropout)
self.fc8 = self.fc_layer(self.relu7, 4096, 1000, "fc8")
self.prob = tf.nn.softmax(self.fc8, name="prob")
self.data_dict = None
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, in_channels, out_channels, name):
with tf.variable_scope(name):
filt, conv_biases = self.get_conv_var(3, in_channels, out_channels, name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, in_size, out_size, name):
with tf.variable_scope(name):
weights, biases = self.get_fc_var(in_size, out_size, name)
x = tf.reshape(bottom, [-1, in_size])
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_var(self, filter_size, in_channels, out_channels, name):
initial_value = tf.truncated_normal([filter_size, filter_size, in_channels, out_channels], 0.0, 0.001)
filters = self.get_var(initial_value, name, 0, name + "_filters")
initial_value = tf.truncated_normal([out_channels], .0, .001)
biases = self.get_var(initial_value, name, 1, name + "_biases")
return filters, biases
def get_fc_var(self, in_size, out_size, name):
initial_value = tf.truncated_normal([in_size, out_size], 0.0, 0.001)
weights = self.get_var(initial_value, name, 0, name + "_weights")
initial_value = tf.truncated_normal([out_size], .0, .001)
biases = self.get_var(initial_value, name, 1, name + "_biases")
return weights, biases
def get_var(self, initial_value, name, idx, var_name):
if self.data_dict is not None and name in self.data_dict:
value = self.data_dict[name][idx]
else:
value = initial_value
if self.trainable:
var = tf.Variable(value, name=var_name)
else:
var = tf.constant(value, dtype=tf.float32, name=var_name)
self.var_dict[(name, idx)] = var
# print var_name, var.get_shape().as_list()
assert var.get_shape() == initial_value.get_shape()
return var
def save_npy(self, sess, npy_path="./vgg19-save.npy"):
assert isinstance(sess, tf.Session)
data_dict = {}
for (name, idx), var in list(self.var_dict.items()):
var_out = sess.run(var)
if name not in data_dict:
data_dict[name] = {}
data_dict[name][idx] = var_out
np.save(npy_path, data_dict)
print(("file saved", npy_path))
return npy_path
def get_var_count(self):
count = 0
for v in list(self.var_dict.values()):
count += reduce(lambda x, y: x * y, v.get_shape().as_list())
return count
|
MartijnBraam/wifite-ng | refs/heads/master | wifiteng/helpers.py | 1 | import subprocess
class Color(object):
GRAY = "\033[1;30m"
RED = "\033[1;31m"
GREEN = "\033[1;32m"
YELLOW = "\033[1;33m"
BLUE = "\033[1;34m"
MAGENTA = "\033[1;35m"
CYAN = "\033[1;36m"
WHITE = "\033[1;37m"
RESET = "\033[0m"
class ShellTool(object):
def call_and_communicate(self, command, timeout=None):
# Create subprocess and connect stdout and stderr to pipe
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
# Run command until it exits or the timeout (if set) expires
(stdout, stderr) = proc.communicate(timeout=timeout)
# Return all information
return proc.returncode, stdout.decode(), stderr.decode()
except subprocess.TimeoutExpired:
# Cleanup after timeout expires and then bubble the exception up
proc.kill()
raise |
lm-tools/sectors | refs/heads/master | web/prototype/apps/sectors/apps.py | 1 | from django.apps import AppConfig
class SectorsConfig(AppConfig):
name = 'sectors'
verbose_name = "Job roles"
icon_url = 'images/icons/app-icon-red.png'
start_url_name = "start"
|
ksmit799/Toontown-Source | refs/heads/master | toontown/coghq/CashbotMintBoilerRoom_Battle00_Cogs.py | 12 | from SpecImports import *
from toontown.toonbase import ToontownGlobals
CogParent = 10000
LowerCogParent = 10003
BattleParent = 10002
LowerBattleParent = 10005
FrontCogParent = 10013
CenterCogParent = 10040
BattleCellId = 0
LowerBattleCellId = 1
FrontBattleCellId = 2
CenterBattleCellId = 3
BattleCells = {BattleCellId: {'parentEntId': BattleParent,
'pos': Point3(0, 0, 0)},
LowerBattleCellId: {'parentEntId': LowerBattleParent,
'pos': Point3(0, 0, 0)},
FrontBattleCellId: {'parentEntId': FrontCogParent,
'pos': Point3(0, 0, 0)},
CenterBattleCellId: {'parentEntId': CenterCogParent,
'pos': Point3(0, 0, 0)}}
CogData = [{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintSkelecogLevel,
'battleCell': BattleCellId,
'pos': Point3(-6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 1},
{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintSkelecogLevel,
'battleCell': BattleCellId,
'pos': Point3(-2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 1},
{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintSkelecogLevel,
'battleCell': BattleCellId,
'pos': Point3(2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 1},
{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintSkelecogLevel,
'battleCell': BattleCellId,
'pos': Point3(6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 1},
{'parentEntId': LowerCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel,
'battleCell': LowerBattleCellId,
'pos': Point3(-6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': LowerCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintSkelecogLevel,
'battleCell': LowerBattleCellId,
'pos': Point3(-2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 1},
{'parentEntId': LowerCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintSkelecogLevel,
'battleCell': LowerBattleCellId,
'pos': Point3(2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 1},
{'parentEntId': LowerCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel,
'battleCell': LowerBattleCellId,
'pos': Point3(6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': FrontCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel,
'battleCell': FrontBattleCellId,
'pos': Point3(-6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': FrontCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel,
'battleCell': FrontBattleCellId,
'pos': Point3(-2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': FrontCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel,
'battleCell': FrontBattleCellId,
'pos': Point3(2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': FrontCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel,
'battleCell': FrontBattleCellId,
'pos': Point3(6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CenterCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel,
'battleCell': CenterBattleCellId,
'pos': Point3(-6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CenterCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintSkelecogLevel,
'battleCell': CenterBattleCellId,
'pos': Point3(-2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 1},
{'parentEntId': CenterCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintSkelecogLevel,
'battleCell': CenterBattleCellId,
'pos': Point3(2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 1},
{'parentEntId': CenterCogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel,
'battleCell': CenterBattleCellId,
'pos': Point3(6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0}]
ReserveCogData = []
|
jness/torrent_tracker | refs/heads/master | Providers/basic.py | 1 | # This Provider can be used for any site that
# has a list of direct download torrent links
from StringIO import StringIO
import gzip
import urllib2
from re import compile
def episodes(s):
'''Returns a list of all episodes greather than
or equal to your startnum from a source with direct
downloads.'''
epis = []
request = urllib2.Request(s['url'])
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
match = compile(s['regex']).findall(data)
# only check for episodes higher than our startnum
for m in match:
for results in m:
if results.isdigit():
start_ep = results
if int(start_ep) >= int(s['startnum']):
epis.append(m)
# return all episodes greater than startnum
return epis
|
Titan-C/sympy | refs/heads/master | sympy/solvers/tests/test_diophantine.py | 5 | from sympy import (Add, factor_list, igcd, Matrix, Mul, S, simplify,
Symbol, symbols, Eq, pi, factorint, oo, powsimp)
from sympy.core.function import _mexpand
from sympy.core.compatibility import range
from sympy.functions.elementary.trigonometric import sin
from sympy.solvers.diophantine import (descent, diop_bf_DN, diop_DN,
diop_solve, diophantine, divisible, equivalent, find_DN, ldescent, length,
reconstruct, partition, power_representation,
prime_as_sum_of_two_squares, square_factor, sum_of_four_squares,
sum_of_three_squares, transformation_to_DN, transformation_to_normal,
classify_diop, base_solution_linear, cornacchia, sqf_normal,
diop_ternary_quadratic_normal, _diop_ternary_quadratic_normal,
gaussian_reduce, holzer,diop_general_pythagorean,
_diop_general_sum_of_squares, _nint_or_floor, _odd, _even,
_remove_gcd, check_param, parametrize_ternary_quadratic,
diop_ternary_quadratic, diop_linear, diop_quadratic,
diop_general_sum_of_squares, sum_of_powers, sum_of_squares,
diop_general_sum_of_even_powers, _can_do_sum_of_squares)
from sympy.utilities import default_sort_key
from sympy.utilities.pytest import slow, raises, XFAIL
a, b, c, d, p, q, x, y, z, w, t, u, v, X, Y, Z = symbols(
"a, b, c, d, p, q, x, y, z, w, t, u, v, X, Y, Z", integer=True)
t_0, t_1, t_2, t_3, t_4, t_5, t_6 = symbols("t_:7", integer=True)
m1, m2, m3 = symbols('m1:4', integer=True)
n1 = symbols('n1', integer=True)
def diop_simplify(eq):
return _mexpand(powsimp(_mexpand(eq)))
def test_input_format():
raises(TypeError, lambda: diophantine(sin(x)))
raises(TypeError, lambda: diophantine(3))
raises(TypeError, lambda: diophantine(x/pi - 3))
def test_univariate():
assert diop_solve((x - 1)*(x - 2)**2) == set([(1,), (2,)])
assert diop_solve((x - 1)*(x - 2)) == set([(1,), (2,)])
def test_classify_diop():
raises(TypeError, lambda: classify_diop(x**2/3 - 1))
raises(ValueError, lambda: classify_diop(1))
raises(NotImplementedError, lambda: classify_diop(w*x*y*z - 1))
assert classify_diop(14*x**2 + 15*x - 42) == (
[x], {1: -42, x: 15, x**2: 14}, 'univariate')
assert classify_diop(x*y + z) == (
[x, y, z], {x*y: 1, z: 1}, 'inhomogeneous_ternary_quadratic')
assert classify_diop(x*y + z + w + x**2) == (
[w, x, y, z], {x*y: 1, w: 1, x**2: 1, z: 1}, 'inhomogeneous_general_quadratic')
assert classify_diop(x*y + x*z + x**2 + 1) == (
[x, y, z], {x*y: 1, x*z: 1, x**2: 1, 1: 1}, 'inhomogeneous_general_quadratic')
assert classify_diop(x*y + z + w + 42) == (
[w, x, y, z], {x*y: 1, w: 1, 1: 42, z: 1}, 'inhomogeneous_general_quadratic')
assert classify_diop(x*y + z*w) == (
[w, x, y, z], {x*y: 1, w*z: 1}, 'homogeneous_general_quadratic')
assert classify_diop(x*y**2 + 1) == (
[x, y], {x*y**2: 1, 1: 1}, 'cubic_thue')
def test_linear():
assert diop_solve(x) == (0,)
assert diop_solve(1*x) == (0,)
assert diop_solve(3*x) == (0,)
assert diop_solve(x + 1) == (-1,)
assert diop_solve(2*x + 1) == (None,)
assert diop_solve(2*x + 4) == (-2,)
assert diop_solve(y + x) == (t_0, -t_0)
assert diop_solve(y + x + 0) == (t_0, -t_0)
assert diop_solve(y + x - 0) == (t_0, -t_0)
assert diop_solve(0*x - y - 5) == (-5,)
assert diop_solve(3*y + 2*x - 5) == (3*t_0 - 5, -2*t_0 + 5)
assert diop_solve(2*x - 3*y - 5) == (3*t_0 - 5, 2*t_0 - 5)
assert diop_solve(-2*x - 3*y - 5) == (3*t_0 + 5, -2*t_0 - 5)
assert diop_solve(7*x + 5*y) == (5*t_0, -7*t_0)
assert diop_solve(2*x + 4*y) == (2*t_0, -t_0)
assert diop_solve(4*x + 6*y - 4) == (3*t_0 - 2, -2*t_0 + 2)
assert diop_solve(4*x + 6*y - 3) == (None, None)
assert diop_solve(0*x + 3*y - 4*z + 5) == (4*t_0 + 5, 3*t_0 + 5)
assert diop_solve(4*x + 3*y - 4*z + 5) == (t_0, 8*t_0 + 4*t_1 + 5, 7*t_0 + 3*t_1 + 5)
assert diop_solve(4*x + 3*y - 4*z + 5, None) == (0, 5, 5)
assert diop_solve(4*x + 2*y + 8*z - 5) == (None, None, None)
assert diop_solve(5*x + 7*y - 2*z - 6) == (t_0, -3*t_0 + 2*t_1 + 6, -8*t_0 + 7*t_1 + 18)
assert diop_solve(3*x - 6*y + 12*z - 9) == (2*t_0 + 3, t_0 + 2*t_1, t_1)
assert diop_solve(6*w + 9*x + 20*y - z) == (t_0, t_1, t_1 + t_2, 6*t_0 + 29*t_1 + 20*t_2)
# to ignore constant factors, use diophantine
raises(TypeError, lambda: diop_solve(x/2))
def test_quadratic_simple_hyperbolic_case():
# Simple Hyperbolic case: A = C = 0 and B != 0
assert diop_solve(3*x*y + 34*x - 12*y + 1) == \
set([(-133, -11), (5, -57)])
assert diop_solve(6*x*y + 2*x + 3*y + 1) == set([])
assert diop_solve(-13*x*y + 2*x - 4*y - 54) == set([(27, 0)])
assert diop_solve(-27*x*y - 30*x - 12*y - 54) == set([(-14, -1)])
assert diop_solve(2*x*y + 5*x + 56*y + 7) == set([(-161, -3),\
(-47,-6), (-35, -12), (-29, -69),\
(-27, 64), (-21, 7),(-9, 1),\
(105, -2)])
assert diop_solve(6*x*y + 9*x + 2*y + 3) == set([])
assert diop_solve(x*y + x + y + 1) == set([(-1, t), (t, -1)])
assert diophantine(48*x*y)
def test_quadratic_elliptical_case():
# Elliptical case: B**2 - 4AC < 0
# Two test cases highlighted require lot of memory due to quadratic_congruence() method.
# This above method should be replaced by Pernici's square_mod() method when his PR gets merged.
#assert diop_solve(42*x**2 + 8*x*y + 15*y**2 + 23*x + 17*y - 4915) == set([(-11, -1)])
assert diop_solve(4*x**2 + 3*y**2 + 5*x - 11*y + 12) == set([])
assert diop_solve(x**2 + y**2 + 2*x + 2*y + 2) == set([(-1, -1)])
#assert diop_solve(15*x**2 - 9*x*y + 14*y**2 - 23*x - 14*y - 4950) == set([(-15, 6)])
assert diop_solve(10*x**2 + 12*x*y + 12*y**2 - 34) == \
set([(1, -2), (-1, -1),(1, 1), (-1, 2)])
def test_quadratic_parabolic_case():
# Parabolic case: B**2 - 4AC = 0
assert check_solutions(8*x**2 - 24*x*y + 18*y**2 + 5*x + 7*y + 16)
assert check_solutions(8*x**2 - 24*x*y + 18*y**2 + 6*x + 12*y - 6)
assert check_solutions(8*x**2 + 24*x*y + 18*y**2 + 4*x + 6*y - 7)
assert check_solutions(x**2 + 2*x*y + y**2 + 2*x + 2*y + 1)
assert check_solutions(x**2 - 2*x*y + y**2 + 2*x + 2*y + 1)
assert check_solutions(y**2 - 41*x + 40)
def test_quadratic_perfect_square():
# B**2 - 4*A*C > 0
# B**2 - 4*A*C is a perfect square
assert check_solutions(48*x*y)
assert check_solutions(4*x**2 - 5*x*y + y**2 + 2)
assert check_solutions(-2*x**2 - 3*x*y + 2*y**2 -2*x - 17*y + 25)
assert check_solutions(12*x**2 + 13*x*y + 3*y**2 - 2*x + 3*y - 12)
assert check_solutions(8*x**2 + 10*x*y + 2*y**2 - 32*x - 13*y - 23)
assert check_solutions(4*x**2 - 4*x*y - 3*y- 8*x - 3)
assert check_solutions(- 4*x*y - 4*y**2 - 3*y- 5*x - 10)
assert check_solutions(x**2 - y**2 - 2*x - 2*y)
assert check_solutions(x**2 - 9*y**2 - 2*x - 6*y)
assert check_solutions(4*x**2 - 9*y**2 - 4*x - 12*y - 3)
def test_quadratic_non_perfect_square():
# B**2 - 4*A*C is not a perfect square
# Used check_solutions() since the solutions are complex expressions involving
# square roots and exponents
assert check_solutions(x**2 - 2*x - 5*y**2)
assert check_solutions(3*x**2 - 2*y**2 - 2*x - 2*y)
assert check_solutions(x**2 - x*y - y**2 - 3*y)
assert check_solutions(x**2 - 9*y**2 - 2*x - 6*y)
def test_issue_9106():
eq = -48 - 2*x*(3*x - 1) + y*(3*y - 1)
v = (x, y)
for sol in diophantine(eq):
assert not diop_simplify(eq.xreplace(dict(zip(v, sol))))
@slow
def test_quadratic_non_perfect_slow():
assert check_solutions(8*x**2 + 10*x*y - 2*y**2 - 32*x - 13*y - 23)
# This leads to very large numbers.
# assert check_solutions(5*x**2 - 13*x*y + y**2 - 4*x - 4*y - 15)
assert check_solutions(-3*x**2 - 2*x*y + 7*y**2 - 5*x - 7)
assert check_solutions(-4 - x + 4*x**2 - y - 3*x*y - 4*y**2)
assert check_solutions(1 + 2*x + 2*x**2 + 2*y + x*y - 2*y**2)
def test_DN():
# Most of the test cases were adapted from,
# Solving the generalized Pell equation x**2 - D*y**2 = N, John P. Robertson, July 31, 2004.
# http://www.jpr2718.org/pell.pdf
# others are verified using Wolfram Alpha.
# Covers cases where D <= 0 or D > 0 and D is a square or N = 0
# Solutions are straightforward in these cases.
assert diop_DN(3, 0) == [(0, 0)]
assert diop_DN(-17, -5) == []
assert diop_DN(-19, 23) == [(2, 1)]
assert diop_DN(-13, 17) == [(2, 1)]
assert diop_DN(-15, 13) == []
assert diop_DN(0, 5) == []
assert diop_DN(0, 9) == [(3, t)]
assert diop_DN(9, 0) == [(3*t, t)]
assert diop_DN(16, 24) == []
assert diop_DN(9, 180) == [(18, 4)]
assert diop_DN(9, -180) == [(12, 6)]
assert diop_DN(7, 0) == [(0, 0)]
# When equation is x**2 + y**2 = N
# Solutions are interchangeable
assert diop_DN(-1, 5) == [(1, 2)]
assert diop_DN(-1, 169) == [(5, 12), (0, 13)]
# D > 0 and D is not a square
# N = 1
assert diop_DN(13, 1) == [(649, 180)]
assert diop_DN(980, 1) == [(51841, 1656)]
assert diop_DN(981, 1) == [(158070671986249, 5046808151700)]
assert diop_DN(986, 1) == [(49299, 1570)]
assert diop_DN(991, 1) == [(379516400906811930638014896080, 12055735790331359447442538767)]
assert diop_DN(17, 1) == [(33, 8)]
assert diop_DN(19, 1) == [(170, 39)]
# N = -1
assert diop_DN(13, -1) == [(18, 5)]
assert diop_DN(991, -1) == []
assert diop_DN(41, -1) == [(32, 5)]
assert diop_DN(290, -1) == [(17, 1)]
assert diop_DN(21257, -1) == [(13913102721304, 95427381109)]
assert diop_DN(32, -1) == []
# |N| > 1
# Some tests were created using calculator at
# http://www.numbertheory.org/php/patz.html
assert diop_DN(13, -4) == [(3, 1), (393, 109), (36, 10)]
# Source I referred returned (3, 1), (393, 109) and (-3, 1) as fundamental solutions
# So (-3, 1) and (393, 109) should be in the same equivalent class
assert equivalent(-3, 1, 393, 109, 13, -4) == True
assert diop_DN(13, 27) == [(220, 61), (40, 11), (768, 213), (12, 3)]
assert set(diop_DN(157, 12)) == \
set([(13, 1), (10663, 851), (579160, 46222), \
(483790960,38610722), (26277068347, 2097138361), (21950079635497, 1751807067011)])
assert diop_DN(13, 25) == [(3245, 900)]
assert diop_DN(192, 18) == []
assert diop_DN(23, 13) == [(-6, 1), (6, 1)]
assert diop_DN(167, 2) == [(13, 1)]
assert diop_DN(167, -2) == []
assert diop_DN(123, -2) == [(11, 1)]
# One calculator returned [(11, 1), (-11, 1)] but both of these are in
# the same equivalence class
assert equivalent(11, 1, -11, 1, 123, -2)
assert diop_DN(123, -23) == [(-10, 1), (10, 1)]
assert diop_DN(0, 0, t) == [(0, t)]
assert diop_DN(0, -1, t) == []
def test_bf_pell():
assert diop_bf_DN(13, -4) == [(3, 1), (-3, 1), (36, 10)]
assert diop_bf_DN(13, 27) == [(12, 3), (-12, 3), (40, 11), (-40, 11)]
assert diop_bf_DN(167, -2) == []
assert diop_bf_DN(1729, 1) == [(44611924489705, 1072885712316)]
assert diop_bf_DN(89, -8) == [(9, 1), (-9, 1)]
assert diop_bf_DN(21257, -1) == [(13913102721304, 95427381109)]
assert diop_bf_DN(340, -4) == [(756, 41)]
assert diop_bf_DN(-1, 0, t) == [(0, 0)]
assert diop_bf_DN(0, 0, t) == [(0, t)]
assert diop_bf_DN(4, 0, t) == [(2*t, t), (-2*t, t)]
assert diop_bf_DN(3, 0, t) == [(0, 0)]
assert diop_bf_DN(1, -2, t) == []
def test_length():
assert length(2, 1, 0) == 1
assert length(-2, 4, 5) == 3
assert length(-5, 4, 17) == 5
assert length(0, 4, 13) == 6
assert length(-31, 8, 613) == 69
assert length(7, 13, 11) == 23
assert length(-40, 5, 23) == 4
assert length(1, 6, 4) == 2
def is_pell_transformation_ok(eq):
"""
Test whether X*Y, X, or Y terms are present in the equation
after transforming the equation using the transformation returned
by transformation_to_pell(). If they are not present we are good.
Moreover, coefficient of X**2 should be a divisor of coefficient of
Y**2 and the constant term.
"""
A, B = transformation_to_DN(eq)
u = (A*Matrix([X, Y]) + B)[0]
v = (A*Matrix([X, Y]) + B)[1]
simplified = diop_simplify(eq.subs(zip((x, y), (u, v))))
coeff = dict([reversed(t.as_independent(*[X, Y])) for t in simplified.args])
for term in [X*Y, X, Y]:
if term in coeff.keys():
return False
for term in [X**2, Y**2, 1]:
if term not in coeff.keys():
coeff[term] = 0
if coeff[X**2] != 0:
return divisible(coeff[Y**2], coeff[X**2]) and \
divisible(coeff[1], coeff[X**2])
return True
def test_transformation_to_pell():
assert is_pell_transformation_ok(-13*x**2 - 7*x*y + y**2 + 2*x - 2*y - 14)
assert is_pell_transformation_ok(-17*x**2 + 19*x*y - 7*y**2 - 5*x - 13*y - 23)
assert is_pell_transformation_ok(x**2 - y**2 + 17)
assert is_pell_transformation_ok(-x**2 + 7*y**2 - 23)
assert is_pell_transformation_ok(25*x**2 - 45*x*y + 5*y**2 - 5*x - 10*y + 5)
assert is_pell_transformation_ok(190*x**2 + 30*x*y + y**2 - 3*y - 170*x - 130)
assert is_pell_transformation_ok(x**2 - 2*x*y -190*y**2 - 7*y - 23*x - 89)
assert is_pell_transformation_ok(15*x**2 - 9*x*y + 14*y**2 - 23*x - 14*y - 4950)
def test_find_DN():
assert find_DN(x**2 - 2*x - y**2) == (1, 1)
assert find_DN(x**2 - 3*y**2 - 5) == (3, 5)
assert find_DN(x**2 - 2*x*y - 4*y**2 - 7) == (5, 7)
assert find_DN(4*x**2 - 8*x*y - y**2 - 9) == (20, 36)
assert find_DN(7*x**2 - 2*x*y - y**2 - 12) == (8, 84)
assert find_DN(-3*x**2 + 4*x*y -y**2) == (1, 0)
assert find_DN(-13*x**2 - 7*x*y + y**2 + 2*x - 2*y -14) == (101, -7825480)
def test_ldescent():
# Equations which have solutions
u = ([(13, 23), (3, -11), (41, -113), (4, -7), (-7, 4), (91, -3), (1, 1), (1, -1),
(4, 32), (17, 13), (123689, 1), (19, -570)])
for a, b in u:
w, x, y = ldescent(a, b)
assert a*x**2 + b*y**2 == w**2
assert ldescent(-1, -1) is None
def test_diop_ternary_quadratic_normal():
assert check_solutions(234*x**2 - 65601*y**2 - z**2)
assert check_solutions(23*x**2 + 616*y**2 - z**2)
assert check_solutions(5*x**2 + 4*y**2 - z**2)
assert check_solutions(3*x**2 + 6*y**2 - 3*z**2)
assert check_solutions(x**2 + 3*y**2 - z**2)
assert check_solutions(4*x**2 + 5*y**2 - z**2)
assert check_solutions(x**2 + y**2 - z**2)
assert check_solutions(16*x**2 + y**2 - 25*z**2)
assert check_solutions(6*x**2 - y**2 + 10*z**2)
assert check_solutions(213*x**2 + 12*y**2 - 9*z**2)
assert check_solutions(34*x**2 - 3*y**2 - 301*z**2)
assert check_solutions(124*x**2 - 30*y**2 - 7729*z**2)
def is_normal_transformation_ok(eq):
A = transformation_to_normal(eq)
X, Y, Z = A*Matrix([x, y, z])
simplified = diop_simplify(eq.subs(zip((x, y, z), (X, Y, Z))))
coeff = dict([reversed(t.as_independent(*[X, Y, Z])) for t in simplified.args])
for term in [X*Y, Y*Z, X*Z]:
if term in coeff.keys():
return False
return True
def test_transformation_to_normal():
assert is_normal_transformation_ok(x**2 + 3*y**2 + z**2 - 13*x*y - 16*y*z + 12*x*z)
assert is_normal_transformation_ok(x**2 + 3*y**2 - 100*z**2)
assert is_normal_transformation_ok(x**2 + 23*y*z)
assert is_normal_transformation_ok(3*y**2 - 100*z**2 - 12*x*y)
assert is_normal_transformation_ok(x**2 + 23*x*y - 34*y*z + 12*x*z)
assert is_normal_transformation_ok(z**2 + 34*x*y - 23*y*z + x*z)
assert is_normal_transformation_ok(x**2 + y**2 + z**2 - x*y - y*z - x*z)
assert is_normal_transformation_ok(x**2 + 2*y*z + 3*z**2)
assert is_normal_transformation_ok(x*y + 2*x*z + 3*y*z)
assert is_normal_transformation_ok(2*x*z + 3*y*z)
def test_diop_ternary_quadratic():
assert check_solutions(2*x**2 + z**2 + y**2 - 4*x*y)
assert check_solutions(x**2 - y**2 - z**2 - x*y - y*z)
assert check_solutions(3*x**2 - x*y - y*z - x*z)
assert check_solutions(x**2 - y*z - x*z)
assert check_solutions(5*x**2 - 3*x*y - x*z)
assert check_solutions(4*x**2 - 5*y**2 - x*z)
assert check_solutions(3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z)
assert check_solutions(8*x**2 - 12*y*z)
assert check_solutions(45*x**2 - 7*y**2 - 8*x*y - z**2)
assert check_solutions(x**2 - 49*y**2 - z**2 + 13*z*y -8*x*y)
assert check_solutions(90*x**2 + 3*y**2 + 5*x*y + 2*z*y + 5*x*z)
assert check_solutions(x**2 + 3*y**2 + z**2 - x*y - 17*y*z)
assert check_solutions(x**2 + 3*y**2 + z**2 - x*y - 16*y*z + 12*x*z)
assert check_solutions(x**2 + 3*y**2 + z**2 - 13*x*y - 16*y*z + 12*x*z)
assert check_solutions(x*y - 7*y*z + 13*x*z)
assert diop_ternary_quadratic_normal(x**2 + y**2 + z**2) == (None, None, None)
assert diop_ternary_quadratic_normal(x**2 + y**2) is None
raises(ValueError, lambda:
_diop_ternary_quadratic_normal((x, y, z),
{x*y: 1, x**2: 2, y**2: 3, z**2: 0}))
eq = -2*x*y - 6*x*z + 7*y**2 - 3*y*z + 4*z**2
assert diop_ternary_quadratic(eq) == (7, 2, 0)
assert diop_ternary_quadratic_normal(4*x**2 + 5*y**2 - z**2) == \
(1, 0, 2)
assert diop_ternary_quadratic(x*y + 2*y*z) == \
(-2, 0, n1)
eq = -5*x*y - 8*x*z - 3*y*z + 8*z**2
assert parametrize_ternary_quadratic(eq) == \
(64*p**2 - 24*p*q, -64*p*q + 64*q**2, 40*p*q)
# this cannot be tested with diophantine because it will
# factor into a product
assert diop_solve(x*y + 2*y*z) == (-4*p*q, -2*n1*p**2 + 2*p**2, 2*p*q)
def test_square_factor():
assert square_factor(1) == square_factor(-1) == 1
assert square_factor(0) == 1
assert square_factor(5) == square_factor(-5) == 1
assert square_factor(4) == square_factor(-4) == 2
assert square_factor(12) == square_factor(-12) == 2
assert square_factor(6) == 1
assert square_factor(18) == 3
assert square_factor(52) == 2
assert square_factor(49) == 7
assert square_factor(392) == 14
assert square_factor(factorint(-12)) == 2
def test_parametrize_ternary_quadratic():
assert check_solutions(x**2 + y**2 - z**2)
assert check_solutions(x**2 + 2*x*y + z**2)
assert check_solutions(234*x**2 - 65601*y**2 - z**2)
assert check_solutions(3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z)
assert check_solutions(x**2 - y**2 - z**2)
assert check_solutions(x**2 - 49*y**2 - z**2 + 13*z*y - 8*x*y)
assert check_solutions(8*x*y + z**2)
assert check_solutions(124*x**2 - 30*y**2 - 7729*z**2)
assert check_solutions(236*x**2 - 225*y**2 - 11*x*y - 13*y*z - 17*x*z)
assert check_solutions(90*x**2 + 3*y**2 + 5*x*y + 2*z*y + 5*x*z)
assert check_solutions(124*x**2 - 30*y**2 - 7729*z**2)
def test_no_square_ternary_quadratic():
assert check_solutions(2*x*y + y*z - 3*x*z)
assert check_solutions(189*x*y - 345*y*z - 12*x*z)
assert check_solutions(23*x*y + 34*y*z)
assert check_solutions(x*y + y*z + z*x)
assert check_solutions(23*x*y + 23*y*z + 23*x*z)
def test_descent():
u = ([(13, 23), (3, -11), (41, -113), (91, -3), (1, 1), (1, -1), (17, 13), (123689, 1), (19, -570)])
for a, b in u:
w, x, y = descent(a, b)
assert a*x**2 + b*y**2 == w**2
# the docstring warns against bad input, so these are expected results
# - can't both be negative
raises(TypeError, lambda: descent(-1, -3))
# A can't be zero unless B != 1
raises(ZeroDivisionError, lambda: descent(0, 3))
# supposed to be square-free
raises(TypeError, lambda: descent(4, 3))
def test_diophantine():
assert check_solutions((x - y)*(y - z)*(z - x))
assert check_solutions((x - y)*(x**2 + y**2 - z**2))
assert check_solutions((x - 3*y + 7*z)*(x**2 + y**2 - z**2))
assert check_solutions((x**2 - 3*y**2 - 1))
assert check_solutions(y**2 + 7*x*y)
assert check_solutions(x**2 - 3*x*y + y**2)
assert check_solutions(z*(x**2 - y**2 - 15))
assert check_solutions(x*(2*y - 2*z + 5))
assert check_solutions((x**2 - 3*y**2 - 1)*(x**2 - y**2 - 15))
assert check_solutions((x**2 - 3*y**2 - 1)*(y - 7*z))
assert check_solutions((x**2 + y**2 - z**2)*(x - 7*y - 3*z + 4*w))
# Following test case caused problems in parametric representation
# But this can be solved by factroing out y.
# No need to use methods for ternary quadratic equations.
assert check_solutions(y**2 - 7*x*y + 4*y*z)
assert check_solutions(x**2 - 2*x + 1)
assert diophantine(x - y) == diophantine(Eq(x, y))
assert diophantine(3*x*pi - 2*y*pi) == set([(2*t_0, 3*t_0)])
assert diophantine(x**2 + y**2 + z**2 - 14) == set([(1, 2, 3)])
assert diophantine(x**2 + 15*x/14 - 3) == set()
# test issue 11049
eq = 92*x**2 - 99*y**2 - z**2
coeff = eq.as_coefficients_dict()
assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \
(9, 7, 51)
assert diophantine(eq) == set([(
891*p**2 + 9*q**2, -693*p**2 - 102*p*q + 7*q**2,
5049*p**2 - 1386*p*q - 51*q**2)])
eq = 2*x**2 + 2*y**2 - z**2
coeff = eq.as_coefficients_dict()
assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \
(1, 1, 2)
assert diophantine(eq) == set([(
2*p**2 - q**2, -2*p**2 + 4*p*q - q**2,
4*p**2 - 4*p*q + 2*q**2)])
eq = 411*x**2+57*y**2-221*z**2
coeff = eq.as_coefficients_dict()
assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \
(2021, 2645, 3066)
assert diophantine(eq) == \
set([(115197*p**2 - 446641*q**2, -150765*p**2 + 1355172*p*q -
584545*q**2, 174762*p**2 - 301530*p*q + 677586*q**2)])
eq = 573*x**2+267*y**2-984*z**2
coeff = eq.as_coefficients_dict()
assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \
(49, 233, 127)
assert diophantine(eq) == \
set([(4361*p**2 - 16072*q**2, -20737*p**2 + 83312*p*q - 76424*q**2,
11303*p**2 - 41474*p*q + 41656*q**2)])
# this produces factors during reconstruction
eq = x**2 + 3*y**2 - 12*z**2
coeff = eq.as_coefficients_dict()
assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \
(0, 2, 1)
assert diophantine(eq) == \
set([(24*p*q, 2*p**2 - 24*q**2, p**2 + 12*q**2)])
# solvers have not been written for every type
raises(NotImplementedError, lambda: diophantine(x*y**2 + 1))
# rational expressions
assert diophantine(1/x) == set()
assert diophantine(1/x + 1/y - S.Half)
set([(6, 3), (-2, 1), (4, 4), (1, -2), (3, 6)])
def test_general_pythagorean():
from sympy.abc import a, b, c, d, e
assert check_solutions(a**2 + b**2 + c**2 - d**2)
assert check_solutions(a**2 + 4*b**2 + 4*c**2 - d**2)
assert check_solutions(9*a**2 + 4*b**2 + 4*c**2 - d**2)
assert check_solutions(9*a**2 + 4*b**2 - 25*d**2 + 4*c**2 )
assert check_solutions(9*a**2 - 16*d**2 + 4*b**2 + 4*c**2)
assert check_solutions(-e**2 + 9*a**2 + 4*b**2 + 4*c**2 + 25*d**2)
assert check_solutions(16*a**2 - b**2 + 9*c**2 + d**2 + 25*e**2)
def test_diop_general_sum_of_squares_quick():
for i in range(3, 10):
assert check_solutions(sum(i**2 for i in symbols(':%i' % i)) - i)
raises(ValueError, lambda: _diop_general_sum_of_squares((x, y), 2))
assert _diop_general_sum_of_squares((x, y, z), -2) == set()
eq = x**2 + y**2 + z**2 - (1 + 4 + 9)
assert diop_general_sum_of_squares(eq) == \
set([(1, 2, 3)])
eq = u**2 + v**2 + x**2 + y**2 + z**2 - 1313
assert len(diop_general_sum_of_squares(eq, 3)) == 3
# issue 11016
var = symbols(':5') + (symbols('6', negative=True),)
eq = Add(*[i**2 for i in var]) - 112
assert diophantine(eq) == set(
[(0, 1, 1, 5, 6, -7), (1, 1, 1, 3, 6, -8), (2, 3, 3, 4,
5, -7), (0, 1, 1, 1, 3, -10), (0, 0, 4, 4, 4, -8), (1, 2, 3,
3, 5, -8), (0, 1, 2, 3, 7, -7), (2, 2, 4, 4, 6, -6), (1, 1,
3, 4, 6, -7), (0, 2, 3, 3, 3, -9), (0, 0, 2, 2, 2, -10), (1,
1, 2, 3, 4, -9), (0, 1, 1, 2, 5, -9), (0, 0, 2, 6, 6, -6),
(1, 3, 4, 5, 5, -6), (0, 2, 2, 2, 6, -8), (0, 3, 3, 3, 6,
-7), (0, 2, 3, 5, 5, -7), (0, 1, 5, 5, 5, -6)])
# handle negated squares with signsimp
assert diophantine(12 - x**2 - y**2 - z**2) == set([(2, 2, 2)])
# diophantine handles simplification, so classify_diop should
# not have to look for additional patterns that are removed
# by diophantine
eq = a**2 + b**2 + c**2 + d**2 - 4
raises(NotImplementedError, lambda: classify_diop(-eq))
def test_diop_partition():
for n in [8, 10]:
for k in range(1, 8):
for p in partition(n, k):
assert len(p) == k
assert [p for p in partition(3, 5)] == []
assert [list(p) for p in partition(3, 5, 1)] == [
[0, 0, 0, 0, 3], [0, 0, 0, 1, 2], [0, 0, 1, 1, 1]]
assert list(partition(0)) == [()]
assert list(partition(1, 0)) == [()]
assert [list(i) for i in partition(3)] == [[1, 1, 1], [1, 2], [3]]
def test_prime_as_sum_of_two_squares():
for i in [5, 13, 17, 29, 37, 41, 2341, 3557, 34841, 64601]:
a, b = prime_as_sum_of_two_squares(i)
assert a**2 + b**2 == i
assert prime_as_sum_of_two_squares(7) is None
ans = prime_as_sum_of_two_squares(800029)
assert ans == (450, 773) and type(ans[0]) is int
def test_sum_of_three_squares():
for i in [0, 1, 2, 34, 123, 34304595905, 34304595905394941, 343045959052344,
800, 801, 802, 803, 804, 805, 806]:
a, b, c = sum_of_three_squares(i)
assert a**2 + b**2 + c**2 == i
assert sum_of_three_squares(7) is None
assert sum_of_three_squares((4**5)*15) is None
assert sum_of_three_squares(25) == (5, 0, 0)
assert sum_of_three_squares(4) == (0, 0, 2)
def test_sum_of_four_squares():
from random import randint
# this should never fail
n = randint(1, 100000000000000)
assert sum(i**2 for i in sum_of_four_squares(n)) == n
assert sum_of_four_squares(0) == (0, 0, 0, 0)
assert sum_of_four_squares(14) == (0, 1, 2, 3)
assert sum_of_four_squares(15) == (1, 1, 2, 3)
assert sum_of_four_squares(18) == (1, 2, 2, 3)
assert sum_of_four_squares(19) == (0, 1, 3, 3)
assert sum_of_four_squares(48) == (0, 4, 4, 4)
def test_power_representation():
tests = [(1729, 3, 2), (234, 2, 4), (2, 1, 2), (3, 1, 3), (5, 2, 2), (12352, 2, 4),
(32760, 2, 3)]
for test in tests:
n, p, k = test
f = power_representation(n, p, k)
while True:
try:
l = next(f)
assert len(l) == k
chk_sum = 0
for l_i in l:
chk_sum = chk_sum + l_i**p
assert chk_sum == n
except StopIteration:
break
assert list(power_representation(20, 2, 4, True)) == \
[(1, 1, 3, 3), (0, 0, 2, 4)]
raises(ValueError, lambda: list(power_representation(1.2, 2, 2)))
raises(ValueError, lambda: list(power_representation(2, 0, 2)))
raises(ValueError, lambda: list(power_representation(2, 2, 0)))
assert list(power_representation(-1, 2, 2)) == []
assert list(power_representation(1, 1, 1)) == [(1,)]
assert list(power_representation(3, 2, 1)) == []
assert list(power_representation(4, 2, 1)) == [(2,)]
assert list(power_representation(3**4, 4, 6, zeros=True)) == \
[(1, 2, 2, 2, 2, 2), (0, 0, 0, 0, 0, 3)]
assert list(power_representation(3**4, 4, 5, zeros=False)) == []
assert list(power_representation(-2, 3, 2)) == [(-1, -1)]
assert list(power_representation(-2, 4, 2)) == []
assert list(power_representation(0, 3, 2, True)) == [(0, 0)]
assert list(power_representation(0, 3, 2, False)) == []
# when we are dealing with squares, do feasibility checks
assert len(list(power_representation(4**10*(8*10 + 7), 2, 3))) == 0
# there will be a recursion error if these aren't recognized
big = 2**30
for i in [13, 10, 7, 5, 4, 2, 1]:
assert list(sum_of_powers(big, 2, big - i)) == []
def test_assumptions():
"""
Test whether diophantine respects the assumptions.
"""
#Test case taken from the below so question regarding assumptions in diophantine module
#http://stackoverflow.com/questions/23301941/how-can-i-declare-natural-symbols-with-sympy
m, n = symbols('m n', integer=True, positive=True)
diof = diophantine(n ** 2 + m * n - 500)
assert diof == set([(5, 20), (40, 10), (95, 5), (121, 4), (248, 2), (499, 1)])
a, b = symbols('a b', integer=True, positive=False)
diof = diophantine(a*b + 2*a + 3*b - 6)
assert diof == set([(-15, -3), (-9, -4), (-7, -5), (-6, -6), (-5, -8), (-4, -14)])
def check_solutions(eq):
"""
Determines whether solutions returned by diophantine() satisfy the original
equation. Hope to generalize this so we can remove functions like check_ternay_quadratic,
check_solutions_normal, check_solutions()
"""
s = diophantine(eq)
factors = Mul.make_args(eq)
var = list(eq.free_symbols)
var.sort(key=default_sort_key)
while s:
solution = s.pop()
for f in factors:
if diop_simplify(f.subs(zip(var, solution))) == 0:
break
else:
return False
return True
def test_diopcoverage():
eq = (2*x + y + 1)**2
assert diop_solve(eq) == set([(t_0, -2*t_0 - 1)])
eq = 2*x**2 + 6*x*y + 12*x + 4*y**2 + 18*y + 18
assert diop_solve(eq) == set([(t_0, -t_0 - 3), (2*t_0 - 3, -t_0)])
assert diop_quadratic(x + y**2 - 3) == set([(-t**2 + 3, -t)])
assert diop_linear(x + y - 3) == (t_0, 3 - t_0)
assert base_solution_linear(0, 1, 2, t=None) == (0, 0)
ans = (3*t - 1, -2*t + 1)
assert base_solution_linear(4, 8, 12, t) == ans
assert base_solution_linear(4, 8, 12, t=None) == tuple(_.subs(t, 0) for _ in ans)
assert cornacchia(1, 1, 20) is None
assert cornacchia(1, 1, 5) == set([(1, 2)])
assert cornacchia(1, 2, 17) == set([(3, 2)])
raises(ValueError, lambda: reconstruct(4, 20, 1))
assert gaussian_reduce(4, 1, 3) == (1, 1)
eq = -w**2 - x**2 - y**2 + z**2
assert diop_general_pythagorean(eq) == \
diop_general_pythagorean(-eq) == \
(m1**2 + m2**2 - m3**2, 2*m1*m3,
2*m2*m3, m1**2 + m2**2 + m3**2)
assert check_param(S(3) + x/3, S(4) + x/2, S(2), x) == (None, None)
assert check_param(S(3)/2, S(4) + x, S(2), x) == (None, None)
assert check_param(S(4) + x, S(3)/2, S(2), x) == (None, None)
assert _nint_or_floor(16, 10) == 2
assert _odd(1) == (not _even(1)) == True
assert _odd(0) == (not _even(0)) == False
assert _remove_gcd(2, 4, 6) == (1, 2, 3)
raises(TypeError, lambda: _remove_gcd((2, 4, 6)))
assert sqf_normal(2 * 3**2 * 5, 2 * 5 * 11, 2 * 7**2 * 11) == \
(11, 1, 5)
# it's ok if these pass some day when the solvers are implemented
raises(NotImplementedError, lambda: diophantine(x**2 + y**2 + x*y + 2*y*z - 12))
raises(NotImplementedError, lambda: diophantine(x**3 + y**2))
def test_holzer():
# if the input is good, don't let it diverge in holzer()
# (but see test_fail_holzer below)
assert holzer(2, 7, 13, 4, 79, 23) == (2, 7, 13)
# None in uv condition met; solution is not Holzer reduced
# so this will hopefully change but is here for coverage
assert holzer(2, 6, 2, 1, 1, 10) == (2, 6, 2)
raises(ValueError, lambda: holzer(2, 7, 14, 4, 79, 23))
@XFAIL
def test_fail_holzer():
eq = lambda x, y, z: a*x**2 + b*y**2 - c*z**2
a, b, c = 4, 79, 23
x, y, z = xyz = 26, 1, 11
X, Y, Z = ans = 2, 7, 13
assert eq(*xyz) == 0
assert eq(*ans) == 0
assert max(a*x**2, b*y**2, c*z**2) <= a*b*c
assert max(a*X**2, b*Y**2, c*Z**2) <= a*b*c
h = holzer(x, y, z, a, b, c)
assert h == ans # it would be nice to get the smaller soln
def test_issue_9539():
assert diophantine(6*w + 9*y + 20*x - z) == \
set([(t_0, t_1, t_1 + t_2, 6*t_0 + 29*t_1 + 9*t_2)])
def test_issue_8943():
assert diophantine(
(3*(x**2 + y**2 + z**2) - 14*(x*y + y*z + z*x))) == \
set([(0, 0, 0)])
def test_diop_sum_of_even_powers():
eq = x**4 + y**4 + z**4 - 2673
assert diop_solve(eq) == set([(3, 6, 6), (2, 4, 7)])
assert diop_general_sum_of_even_powers(eq, 2) == set(
[(3, 6, 6), (2, 4, 7)])
raises(NotImplementedError, lambda: diop_general_sum_of_even_powers(-eq, 2))
neg = symbols('neg', negative=True)
eq = x**4 + y**4 + neg**4 - 2673
assert diop_general_sum_of_even_powers(eq) == set([(-3, 6, 6)])
assert diophantine(x**4 + y**4 + 2) == set()
assert diop_general_sum_of_even_powers(x**4 + y**4 - 2, limit=0) == set()
def test_sum_of_squares_powers():
tru = set([
(0, 0, 1, 1, 11), (0, 0, 5, 7, 7), (0, 1, 3, 7, 8), (0, 1, 4, 5, 9),
(0, 3, 4, 7, 7), (0, 3, 5, 5, 8), (1, 1, 2, 6, 9), (1, 1, 6, 6, 7),
(1, 2, 3, 3, 10), (1, 3, 4, 4, 9), (1, 5, 5, 6, 6), (2, 2, 3, 5, 9),
(2, 3, 5, 6, 7), (3, 3, 4, 5, 8)])
eq = u**2 + v**2 + x**2 + y**2 + z**2 - 123
ans = diop_general_sum_of_squares(eq, oo) # allow oo to be used
assert len(ans) == 14
raises(ValueError, lambda: list(sum_of_squares(10, -1)))
assert list(sum_of_squares(-10, 2)) == []
assert list(sum_of_squares(2, 3)) == []
assert list(sum_of_squares(0, 3, True)) == [(0, 0, 0)]
assert list(sum_of_squares(0, 3)) == []
assert list(sum_of_squares(4, 1)) == [(2,)]
assert list(sum_of_squares(5, 1)) == []
assert list(sum_of_squares(50, 2)) == [(5, 5), (1, 7)]
assert list(sum_of_squares(11, 5, True)) == [
(1, 1, 1, 2, 2), (0, 0, 1, 1, 3)]
assert list(sum_of_squares(8, 8)) == [(1, 1, 1, 1, 1, 1, 1, 1)]
assert [len(list(sum_of_squares(i, 5, True))) for i in range(30)] == [
1, 1, 1, 1, 2,
2, 1, 1, 2, 2,
2, 2, 2, 3, 2,
1, 3, 3, 3, 3,
4, 3, 3, 2, 2,
4, 4, 4, 4, 5]
assert [len(list(sum_of_squares(i, 5))) for i in range(30)] == [
0, 0, 0, 0, 0,
1, 0, 0, 1, 0,
0, 1, 0, 1, 1,
0, 1, 1, 0, 1,
2, 1, 1, 1, 1,
1, 1, 1, 1, 3]
for i in range(30):
s1 = set(sum_of_squares(i, 5, True))
assert not s1 or all(sum(j**2 for j in t) == i for t in s1)
s2 = set(sum_of_squares(i, 5))
assert all(sum(j**2 for j in t) == i for t in s2)
raises(ValueError, lambda: list(sum_of_powers(2, -1, 1)))
raises(ValueError, lambda: list(sum_of_powers(2, 1, -1)))
assert list(sum_of_powers(-2, 3, 2)) == [(-1, -1)]
assert list(sum_of_powers(-2, 4, 2)) == []
assert list(sum_of_powers(2, 1, 1)) == [(2,)]
assert list(sum_of_powers(2, 1, 3, True)) == [(0, 0, 2), (0, 1, 1)]
assert list(sum_of_powers(5, 1, 2, True)) == [(0, 5), (1, 4), (2, 3)]
assert list(sum_of_powers(6, 2, 2)) == []
assert list(sum_of_powers(3**5, 3, 1)) == []
assert list(sum_of_powers(3**6, 3, 1)) == [(9,)] and (9**3 == 3**6)
assert list(sum_of_powers(2**1000, 5, 2)) == []
def test__can_do_sum_of_squares():
assert _can_do_sum_of_squares(3, -1) is False
assert _can_do_sum_of_squares(-3, 1) is False
assert _can_do_sum_of_squares(0, 1)
assert _can_do_sum_of_squares(4, 1)
assert _can_do_sum_of_squares(1, 2)
assert _can_do_sum_of_squares(2, 2)
assert _can_do_sum_of_squares(3, 2) is False
def test_issue_9538():
eq = x - 3*y + 2
assert diophantine(eq, syms=[y,x]) == set([(t_0, 3*t_0 - 2)])
raises(TypeError, lambda: diophantine(eq, syms=set([y,x])))
|
liikGit/MissionPlanner | refs/heads/master | Lib/site-packages/numpy/f2py/auxfuncs.py | 51 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
"""
Auxiliary functions for f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) LICENSE.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/24 19:01:55 $
Pearu Peterson
"""
__version__ = "$Revision: 1.65 $"[10:-1]
import __version__
f2py_version = __version__.version
import pprint
import sys
import types
import cfuncs
errmess=sys.stderr.write
#outmess=sys.stdout.write
show=pprint.pprint
options={}
debugoptions=[]
wrapfuncs = 1
if sys.version_info[0] >= 3:
from functools import reduce
def outmess(t):
if options.get('verbose',1):
sys.stdout.write(t)
def debugcapi(var):
return 'capi' in debugoptions
def _isstring(var):
return 'typespec' in var and var['typespec']=='character' and (not isexternal(var))
def isstring(var):
return _isstring(var) and not isarray(var)
def ischaracter(var):
return isstring(var) and 'charselector' not in var
def isstringarray(var):
return isarray(var) and _isstring(var)
def isarrayofstrings(var):
# leaving out '*' for now so that
# `character*(*) a(m)` and `character a(m,*)`
# are treated differently. Luckily `character**` is illegal.
return isstringarray(var) and var['dimension'][-1]=='(*)'
def isarray(var):
return 'dimension' in var and (not isexternal(var))
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def iscomplex(var):
return isscalar(var) and var.get('typespec') in ['complex','double complex']
def islogical(var):
return isscalar(var) and var.get('typespec')=='logical'
def isinteger(var):
return isscalar(var) and var.get('typespec')=='integer'
def isreal(var):
return isscalar(var) and var.get('typespec')=='real'
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def islong_long(var):
if not isscalar(var):
return 0
if var.get('typespec') not in ['integer','logical']:
return 0
return get_kind(var)=='8'
def isunsigned_char(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-1'
def isunsigned_short(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-2'
def isunsigned(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-4'
def isunsigned_long_long(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-8'
def isdouble(var):
if not isscalar(var):
return 0
if not var.get('typespec')=='real':
return 0
return get_kind(var)=='8'
def islong_double(var):
if not isscalar(var):
return 0
if not var.get('typespec')=='real':
return 0
return get_kind(var)=='16'
def islong_complex(var):
if not iscomplex(var):
return 0
return get_kind(var)=='32'
def iscomplexarray(var):
return isarray(var) and var.get('typespec') in ['complex','double complex']
def isint1array(var):
return isarray(var) and var.get('typespec')=='integer' \
and get_kind(var)=='1'
def isunsigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-1'
def isunsigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-2'
def isunsignedarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-4'
def isunsigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-8'
def issigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='1'
def issigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='2'
def issigned_array(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='4'
def issigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='8'
def isallocatable(var):
return 'attrspec' in var and 'allocatable' in var['attrspec']
def ismutable(var):
return not (not 'dimension' in var or isstring(var))
def ismoduleroutine(rout):
return 'modulename' in rout
def ismodule(rout):
return ('block' in rout and 'module'==rout['block'])
def isfunction(rout):
return ('block' in rout and 'function'==rout['block'])
#def isfunction_wrap(rout):
# return wrapfuncs and (iscomplexfunction(rout) or isstringfunction(rout)) and (not isexternal(rout))
def isfunction_wrap(rout):
if isintent_c(rout):
return 0
return wrapfuncs and isfunction(rout) and (not isexternal(rout))
def issubroutine(rout):
return ('block' in rout and 'subroutine'==rout['block'])
def isroutine(rout):
return isfunction(rout) or issubroutine(rout)
def islogicalfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islogical(rout['vars'][a])
return 0
def islong_longfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islong_long(rout['vars'][a])
return 0
def islong_doublefunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islong_double(rout['vars'][a])
return 0
def iscomplexfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return iscomplex(rout['vars'][a])
return 0
def iscomplexfunction_warn(rout):
if iscomplexfunction(rout):
outmess("""\
**************************************************************
Warning: code with a function returning complex value
may not work correctly with your Fortran compiler.
Run the following test before using it in your applications:
$(f2py install dir)/test-site/{b/runme_scalar,e/runme}
When using GNU gcc/g77 compilers, codes should work correctly.
**************************************************************\n""")
return 1
return 0
def isstringfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return isstring(rout['vars'][a])
return 0
def hasexternals(rout):
return 'externals' in rout and rout['externals']
def isthreadsafe(rout):
return 'f2pyenhancements' in rout and 'threadsafe' in rout['f2pyenhancements']
def hasvariables(rout):
return 'vars' in rout and rout['vars']
def isoptional(var):
return ('attrspec' in var and 'optional' in var['attrspec'] and 'required' not in var['attrspec']) and isintent_nothide(var)
def isexternal(var):
return ('attrspec' in var and 'external' in var['attrspec'])
def isrequired(var):
return not isoptional(var) and isintent_nothide(var)
def isintent_in(var):
if 'intent' not in var:
return 1
if 'hide' in var['intent']:
return 0
if 'inplace' in var['intent']:
return 0
if 'in' in var['intent']:
return 1
if 'out' in var['intent']:
return 0
if 'inout' in var['intent']:
return 0
if 'outin' in var['intent']:
return 0
return 1
def isintent_inout(var):
return 'intent' in var and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent']
def isintent_out(var):
return 'out' in var.get('intent',[])
def isintent_hide(var):
return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout,isintent_inplace)(var)))))
def isintent_nothide(var):
return not isintent_hide(var)
def isintent_c(var):
return 'c' in var.get('intent',[])
# def isintent_f(var):
# return not isintent_c(var)
def isintent_cache(var):
return 'cache' in var.get('intent',[])
def isintent_copy(var):
return 'copy' in var.get('intent',[])
def isintent_overwrite(var):
return 'overwrite' in var.get('intent',[])
def isintent_callback(var):
return 'callback' in var.get('intent',[])
def isintent_inplace(var):
return 'inplace' in var.get('intent',[])
def isintent_aux(var):
return 'aux' in var.get('intent',[])
def isintent_aligned4(var):
return 'aligned4' in var.get('intent',[])
def isintent_aligned8(var):
return 'aligned8' in var.get('intent',[])
def isintent_aligned16(var):
return 'aligned16' in var.get('intent',[])
isintent_dict = {isintent_in:'INTENT_IN',isintent_inout:'INTENT_INOUT',
isintent_out:'INTENT_OUT',isintent_hide:'INTENT_HIDE',
isintent_cache:'INTENT_CACHE',
isintent_c:'INTENT_C',isoptional:'OPTIONAL',
isintent_inplace:'INTENT_INPLACE',
isintent_aligned4:'INTENT_ALIGNED4',
isintent_aligned8:'INTENT_ALIGNED8',
isintent_aligned16:'INTENT_ALIGNED16',
}
def isprivate(var):
return 'attrspec' in var and 'private' in var['attrspec']
def hasinitvalue(var):
return '=' in var
def hasinitvalueasstring(var):
if not hasinitvalue(var):
return 0
return var['='][0] in ['"',"'"]
def hasnote(var):
return 'note' in var
def hasresultnote(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return hasnote(rout['vars'][a])
return 0
def hascommon(rout):
return 'common' in rout
def containscommon(rout):
if hascommon(rout):
return 1
if hasbody(rout):
for b in rout['body']:
if containscommon(b):
return 1
return 0
def containsmodule(block):
if ismodule(block):
return 1
if not hasbody(block):
return 0
for b in block['body']:
if containsmodule(b):
return 1
return 0
def hasbody(rout):
return 'body' in rout
def hascallstatement(rout):
return getcallstatement(rout) is not None
def istrue(var):
return 1
def isfalse(var):
return 0
class F2PYError(Exception):
pass
class throw_error:
def __init__(self,mess):
self.mess = mess
def __call__(self,var):
mess = '\n\n var = %s\n Message: %s\n' % (var,self.mess)
raise F2PYError,mess
def l_and(*f):
l,l2='lambda v',[]
for i in range(len(f)):
l='%s,f%d=f[%d]'%(l,i,i)
l2.append('f%d(v)'%(i))
return eval('%s:%s'%(l,' and '.join(l2)))
def l_or(*f):
l,l2='lambda v',[]
for i in range(len(f)):
l='%s,f%d=f[%d]'%(l,i,i)
l2.append('f%d(v)'%(i))
return eval('%s:%s'%(l,' or '.join(l2)))
def l_not(f):
return eval('lambda v,f=f:not f(v)')
def isdummyroutine(rout):
try:
return rout['f2pyenhancements']['fortranname']==''
except KeyError:
return 0
def getfortranname(rout):
try:
name = rout['f2pyenhancements']['fortranname']
if name=='':
raise KeyError
if not name:
errmess('Failed to use fortranname from %s\n'%(rout['f2pyenhancements']))
raise KeyError
except KeyError:
name = rout['name']
return name
def getmultilineblock(rout,blockname,comment=1,counter=0):
try:
r = rout['f2pyenhancements'].get(blockname)
except KeyError:
return
if not r: return
if counter>0 and type(r) is type(''):
return
if type(r) is type([]):
if counter>=len(r): return
r = r[counter]
if r[:3]=="'''":
if comment:
r = '\t/* start ' + blockname + ' multiline ('+`counter`+') */\n' + r[3:]
else:
r = r[3:]
if r[-3:]=="'''":
if comment:
r = r[:-3] + '\n\t/* end multiline ('+`counter`+')*/'
else:
r = r[:-3]
else:
errmess("%s multiline block should end with `'''`: %s\n" \
% (blockname,repr(r)))
return r
def getcallstatement(rout):
return getmultilineblock(rout,'callstatement')
def getcallprotoargument(rout,cb_map={}):
r = getmultilineblock(rout,'callprotoargument',comment=0)
if r: return r
if hascallstatement(rout):
outmess('warning: callstatement is defined without callprotoargument\n')
return
from capi_maps import getctype
arg_types,arg_types2 = [],[]
if l_and(isstringfunction,l_not(isfunction_wrap))(rout):
arg_types.extend(['char*','size_t'])
for n in rout['args']:
var = rout['vars'][n]
if isintent_callback(var):
continue
if n in cb_map:
ctype = cb_map[n]+'_typedef'
else:
ctype = getctype(var)
if l_and(isintent_c,l_or(isscalar,iscomplex))(var):
pass
elif isstring(var):
pass
#ctype = 'void*'
else:
ctype = ctype+'*'
if isstring(var) or isarrayofstrings(var):
arg_types2.append('size_t')
arg_types.append(ctype)
proto_args = ','.join(arg_types+arg_types2)
if not proto_args:
proto_args = 'void'
#print proto_args
return proto_args
def getusercode(rout):
return getmultilineblock(rout,'usercode')
def getusercode1(rout):
return getmultilineblock(rout,'usercode',counter=1)
def getpymethoddef(rout):
return getmultilineblock(rout,'pymethoddef')
def getargs(rout):
sortargs,args=[],[]
if 'args' in rout:
args=rout['args']
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args: sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else: sortargs=rout['args']
return args,sortargs
def getargs2(rout):
sortargs,args=[],rout.get('args',[])
auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\
and a not in args]
args = auxvars + args
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args: sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else: sortargs=auxvars + rout['args']
return args,sortargs
def getrestdoc(rout):
if 'f2pymultilines' not in rout:
return None
k = None
if rout['block']=='python module':
k = rout['block'],rout['name']
return rout['f2pymultilines'].get(k,None)
def gentitle(name):
l=(80-len(name)-6)//2
return '/*%s %s %s*/'%(l*'*',name,l*'*')
def flatlist(l):
if type(l)==types.ListType:
return reduce(lambda x,y,f=flatlist:x+f(y),l,[])
return [l]
def stripcomma(s):
if s and s[-1]==',': return s[:-1]
return s
def replace(str,d,defaultsep=''):
if type(d)==types.ListType:
return map(lambda d,f=replace,sep=defaultsep,s=str:f(s,d,sep),d)
if type(str)==types.ListType:
return map(lambda s,f=replace,sep=defaultsep,d=d:f(s,d,sep),str)
for k in 2*d.keys():
if k=='separatorsfor':
continue
if 'separatorsfor' in d and k in d['separatorsfor']:
sep=d['separatorsfor'][k]
else:
sep=defaultsep
if type(d[k])==types.ListType:
str=str.replace('#%s#'%(k),sep.join(flatlist(d[k])))
else:
str=str.replace('#%s#'%(k),d[k])
return str
def dictappend(rd,ar):
if type(ar)==types.ListType:
for a in ar:
rd=dictappend(rd,a)
return rd
for k in ar.keys():
if k[0]=='_':
continue
if k in rd:
if type(rd[k])==str:
rd[k]=[rd[k]]
if type(rd[k])==types.ListType:
if type(ar[k])==types.ListType:
rd[k]=rd[k]+ar[k]
else:
rd[k].append(ar[k])
elif type(rd[k])==types.DictType:
if type(ar[k])==types.DictType:
if k=='separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
rd[k][k1]=ar[k][k1]
else:
rd[k]=dictappend(rd[k],ar[k])
else:
rd[k]=ar[k]
return rd
def applyrules(rules,d,var={}):
ret={}
if type(rules)==types.ListType:
for r in rules:
rr=applyrules(r,d,var)
ret=dictappend(ret,rr)
if '_break' in rr:
break
return ret
if '_check' in rules and (not rules['_check'](var)):
return ret
if 'need' in rules:
res = applyrules({'needs':rules['need']},d,var)
if 'needs' in res:
cfuncs.append_needs(res['needs'])
for k in rules.keys():
if k=='separatorsfor':
ret[k]=rules[k]; continue
if type(rules[k])==str:
ret[k]=replace(rules[k],d)
elif type(rules[k])==types.ListType:
ret[k]=[]
for i in rules[k]:
ar=applyrules({k:i},d,var)
if k in ar:
ret[k].append(ar[k])
elif k[0]=='_':
continue
elif type(rules[k])==types.DictType:
ret[k]=[]
for k1 in rules[k].keys():
if type(k1)==types.FunctionType and k1(var):
if type(rules[k][k1])==types.ListType:
for i in rules[k][k1]:
if type(i)==types.DictType:
res=applyrules({'supertext':i},d,var)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i,d))
else:
i=rules[k][k1]
if type(i)==types.DictType:
res=applyrules({'supertext':i},d)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i,d))
else:
errmess('applyrules: ignoring rule %s.\n'%`rules[k]`)
if type(ret[k])==types.ListType:
if len(ret[k])==1:
ret[k]=ret[k][0]
if ret[k]==[]:
del ret[k]
return ret
|
40223240/2015cdb_g3 | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/webbrowser.py | 735 | from browser import window
__all__ = ["Error", "open", "open_new", "open_new_tab"]
class Error(Exception):
pass
_target = { 0: '', 1: '_blank', 2: '_new' } # hack...
def open(url, new=0, autoraise=True):
"""
new window or tab is not controllable
on the client side. autoraise not available.
"""
if window.open(url, _target[new]):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
|
fengbaicanhe/intellij-community | refs/heads/master | python/testData/mover/multiLineSelection10_afterDown.py | 83 | if True:
pass
<caret><selection>a = 2
b = 3</selection>
|
andydandy74/ClockworkForDynamo | refs/heads/master | nodes/0.7.x/python/WallLocationLine.FinishFaceInterior.py | 12 | import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
OUT = WallLocationLine.FinishFaceInterior |
leilihh/nova | refs/heads/stable/icehouse | nova/cert/rpcapi.py | 12 | # Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the cert manager RPC API.
"""
from oslo.config import cfg
from oslo import messaging
from nova import rpc
rpcapi_opts = [
cfg.StrOpt('cert_topic',
default='cert',
help='The topic cert nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('cert',
help='Set a version cap for messages sent to cert services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class CertAPI(object):
'''Client side of the cert rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_backdoor_port()
... Grizzly and Havana support message version 1.1. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.1.
2.0 - Major API rev for Icehouse
'''
VERSION_ALIASES = {
'grizzly': '1.1',
'havana': '1.1',
}
def __init__(self):
super(CertAPI, self).__init__()
target = messaging.Target(topic=CONF.cert_topic, version='2.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.cert,
CONF.upgrade_levels.cert)
self.client = rpc.get_client(target, version_cap=version_cap)
def _get_compat_version(self, current, havana_compat):
if not self.client.can_send_version(current):
return havana_compat
return current
def revoke_certs_by_user(self, ctxt, user_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('2.0', '1.0')
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'revoke_certs_by_user', user_id=user_id)
def revoke_certs_by_project(self, ctxt, project_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('2.0', '1.0')
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'revoke_certs_by_project',
project_id=project_id)
def revoke_certs_by_user_and_project(self, ctxt, user_id, project_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('2.0', '1.0')
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'revoke_certs_by_user_and_project',
user_id=user_id, project_id=project_id)
def generate_x509_cert(self, ctxt, user_id, project_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('2.0', '1.0')
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'generate_x509_cert',
user_id=user_id,
project_id=project_id)
def fetch_ca(self, ctxt, project_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('2.0', '1.0')
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'fetch_ca', project_id=project_id)
def fetch_crl(self, ctxt, project_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('2.0', '1.0')
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'fetch_crl', project_id=project_id)
def decrypt_text(self, ctxt, project_id, text):
# NOTE(russellb) Havana compat
version = self._get_compat_version('2.0', '1.0')
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'decrypt_text',
project_id=project_id,
text=text)
|
developerinlondon/ansible-modules-extras | refs/heads/devel | messaging/rabbitmq_exchange.py | 74 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: rabbitmq_exchange
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ exchanges
description:
- This module uses rabbitMQ Rest API to create/delete exchanges
requirements: [ python requests ]
options:
name:
description:
- Name of the exchange to create
required: true
state:
description:
- Whether the exchange should be present or absent
- Only present implemented atm
choices: [ "present", "absent" ]
required: false
default: present
login_user:
description:
- rabbitMQ user for connection
required: false
default: guest
login_password:
description:
- rabbitMQ password for connection
required: false
default: false
login_host:
description:
- rabbitMQ host for connection
required: false
default: localhost
login_port:
description:
- rabbitMQ management api port
required: false
default: 15672
vhost:
description:
- rabbitMQ virtual host
required: false
default: "/"
durable:
description:
- whether exchange is durable or not
required: false
choices: [ "yes", "no" ]
default: yes
exchange_type:
description:
- type for the exchange
required: false
choices: [ "fanout", "direct", "headers", "topic" ]
aliases: [ "type" ]
default: direct
auto_delete:
description:
- if the exchange should delete itself after all queues/exchanges unbound from it
required: false
choices: [ "yes", "no" ]
default: no
internal:
description:
- exchange is available only for other exchanges
required: false
choices: [ "yes", "no" ]
default: no
arguments:
description:
- extra arguments for exchange. If defined this argument is a key/value dictionary
required: false
default: {}
'''
EXAMPLES = '''
# Create direct exchange
- rabbitmq_exchange: name=directExchange
# Create topic exchange on vhost
- rabbitmq_exchange: name=topicExchange type=topic vhost=myVhost
'''
import requests
import urllib
import json
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
name = dict(required=True, type='str'),
login_user = dict(default='guest', type='str'),
login_password = dict(default='guest', type='str', no_log=True),
login_host = dict(default='localhost', type='str'),
login_port = dict(default='15672', type='str'),
vhost = dict(default='/', type='str'),
durable = dict(default=True, choices=BOOLEANS, type='bool'),
auto_delete = dict(default=False, choices=BOOLEANS, type='bool'),
internal = dict(default=False, choices=BOOLEANS, type='bool'),
exchange_type = dict(default='direct', aliases=['type'], type='str'),
arguments = dict(default=dict(), type='dict')
),
supports_check_mode = True
)
url = "http://%s:%s/api/exchanges/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
module.params['name']
)
# Check if exchange already exists
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
if r.status_code==200:
exchange_exists = True
response = r.json()
elif r.status_code==404:
exchange_exists = False
response = r.text
else:
module.fail_json(
msg = "Invalid response from RESTAPI when trying to check if exchange exists",
details = r.text
)
if module.params['state']=='present':
change_required = not exchange_exists
else:
change_required = exchange_exists
# Check if attributes change on existing exchange
if not change_required and r.status_code==200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['auto_delete'] and
response['internal'] == module.params['internal'] and
response['type'] == module.params['exchange_type']
):
module.fail_json(
msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing exchanges"
)
# Exit if check_mode
if module.check_mode:
module.exit_json(
changed= change_required,
name = module.params['name'],
details = response,
arguments = module.params['arguments']
)
# Do changes
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
auth = (module.params['login_user'],module.params['login_password']),
headers = { "content-type": "application/json"},
data = json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['auto_delete'],
"internal": module.params['internal'],
"type": module.params['exchange_type'],
"arguments": module.params['arguments']
})
)
elif module.params['state'] == 'absent':
r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
if r.status_code == 204:
module.exit_json(
changed = True,
name = module.params['name']
)
else:
module.fail_json(
msg = "Error creating exchange",
status = r.status_code,
details = r.text
)
else:
module.exit_json(
changed = False,
name = module.params['name']
)
# import module snippets
from ansible.module_utils.basic import *
main()
|
kelvin13/Knockout | refs/heads/master | meredith/elements.py | 2 | from meredith.box import Box, random_serial
from meredith.styles import _text_DNA
font_serial_generator = random_serial()
class Fontpost(Box):
name = '_f_'
inline = True
DNA = [('class', 'texttc', '')]
def copy(self):
return self.__class__(self.KT, self.attrs)
def __eq__(self, other):
return type(other) is self.__class__ and other['class'] == self['class']
class PosFontpost(Fontpost):
name = 'fo'
countersign = True
fixed_attrs = {a[0] for a in _text_DNA}
DNA = Fontpost.DNA + [A[:2] for A in _text_DNA]
def __init__(self, * I , ** KI ):
super().__init__( * I , ** KI )
self.isbase = False
self._update_hash()
def _update_hash(self):
if self.keys() & self.__class__.fixed_attrs:
self.stylehash = next(font_serial_generator)
else:
self.stylehash = None
def after(self, A):
self._update_hash()
def __str__(self):
return '<fo/>'
class NegFontpost(Fontpost):
name = 'fc'
countersign = False
DNA = Fontpost.DNA + [('pop', 'int', 0)]
def __str__(self):
return '<fc/>'
class Line_break(Box):
name = 'br'
inline = True
def __str__(self):
return '<br/>'
class Reverse(Box):
name = 'reverse'
inline = True
DNA = [('language', 'language', None)]
members = (PosFontpost, NegFontpost, Line_break, Reverse)
|
manti-by/M2DJ | refs/heads/master | home/utils.py | 1 |
def get_home_items():
pass
|
mbayon/TFG-MachineLearning | refs/heads/master | vbig/lib/python2.7/site-packages/django/contrib/contenttypes/__init__.py | 809 | default_app_config = 'django.contrib.contenttypes.apps.ContentTypesConfig'
|
peerster/CouchPotatoServer | refs/heads/master | libs/chardet/sjisprober.py | 1776 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
domenicosolazzo/practice-django | refs/heads/master | venv/lib/python2.7/site-packages/django/contrib/auth/tests/utils.py | 220 | from unittest import skipIf
from django.conf import settings
def skipIfCustomUser(test_func):
"""
Skip a test if a custom user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL != 'auth.User', 'Custom user model in use')(test_func)
|
azaghal/django-wiki | refs/heads/main | tests/plugins/images/test_views.py | 2 | import base64
import os
import re
from io import BytesIO
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.urls import reverse
from PIL import Image
from wiki.core.plugins import registry as plugin_registry
from wiki.models import URLPath
from wiki.plugins.images import models
from wiki.plugins.images.wiki_plugin import ImagePlugin
from ...base import ArticleWebTestUtils
from ...base import DjangoClientTestBase
from ...base import RequireRootArticleMixin
from ...base import wiki_override_settings
class ImageTests(RequireRootArticleMixin, ArticleWebTestUtils, DjangoClientTestBase):
def setUp(self):
super().setUp()
self.article = self.root_article
# A black 1x1 gif
self.test_data = "R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="
def _create_gif_filestream_from_base64(self, str_base64, **kwargs):
"""
Helper function to create filestream for upload.
Parameters :
strData : str, test string data
Optional Arguments :
filename : str, Defaults to 'test.txt'
"""
filename = kwargs.get("filename", "test.gif")
data = base64.b64decode(str_base64)
filedata = BytesIO(data)
filestream = InMemoryUploadedFile(
filedata, None, filename, "image", len(data), None
)
return filestream
def _create_test_image(self, path):
# Get the form index
plugin_index = -1
for cnt, plugin_instance in enumerate(plugin_registry.get_sidebar()):
if isinstance(plugin_instance, ImagePlugin):
plugin_index = cnt
break
self.assertGreaterEqual(plugin_index, 0, msg="Image plugin not activated")
base_edit_url = reverse("wiki:edit", kwargs={"path": path})
url = base_edit_url + "?f=form{0:d}".format(plugin_index)
filestream = self._create_gif_filestream_from_base64(self.test_data)
response = self.client.post(
url,
{
"unsaved_article_title": self.article.current_revision.title,
"unsaved_article_content": self.article.current_revision.content,
"image": filestream,
"images_save": "1",
},
)
self.assertRedirects(response, base_edit_url)
def test_index(self):
url = reverse("wiki:images_index", kwargs={"path": ""})
response = self.client.get(
url,
)
self.assertContains(response, "Images")
def test_upload(self):
"""
Tests that simple file upload uploads correctly
Uploading a file should preserve the original filename.
Uploading should not modify file in any way.
"""
self._create_test_image("")
# Check the object was created.
image = models.Image.objects.get()
image_revision = image.current_revision.imagerevision
self.assertEqual(image_revision.get_filename(), "test.gif")
self.assertEqual(
image_revision.image.file.read(), base64.b64decode(self.test_data)
)
def get_article(self, cont, image):
urlpath = URLPath.create_urlpath(
URLPath.root(), "html_image", title="TestImage", content=cont
)
if image:
self._create_test_image(urlpath.path)
return urlpath.article.render()
def test_image_missing(self):
output = self.get_article("[image:1]", False)
expected = (
'<figure class="thumbnail"><a href="">'
'<div class="caption"><em>Image not found</em></div>'
'</a><figcaption class="caption"></figcaption></figure>'
)
self.assertEqual(output, expected)
def test_image_default(self):
output = self.get_article("[image:1]", True)
image_rev = models.Image.objects.get().current_revision.imagerevision
expected = re.compile(
r'<figure class="thumbnail">'
r'<a href="' + re.escape(image_rev.image.url) + '">'
r'<img alt="test\.gif" src="/?cache/.*\.jpg">'
r'</a><figcaption class="caption"></figcaption></figure>'
)
self.assertRegex(output, expected)
def test_image_large_right(self):
output = self.get_article("[image:1 align:right size:large]", True)
image_rev = models.Image.objects.get().current_revision.imagerevision
expected = re.compile(
r'<figure class="thumbnail float-right">'
r'<a href="' + re.escape(image_rev.image.url) + '">'
r'<img alt="test\.gif" src="/?cache/.*\.jpg"></a>'
r'<figcaption class="caption"></figcaption></figure>'
)
self.assertRegex(output, expected)
def test_image_orig(self):
output = self.get_article("[image:1 size:orig]", True)
image_rev = models.Image.objects.get().current_revision.imagerevision
expected = (
'<figure class="thumbnail">'
'<a href="' + image_rev.image.url + '">'
'<img alt="test.gif" src="' + image_rev.image.url + '"></a>'
'<figcaption class="caption"></figcaption></figure>'
)
self.assertEqual(output, expected)
# https://gist.github.com/guillaumepiot/817a70706587da3bd862835c59ef584e
def generate_photo_file(self):
file = BytesIO()
image = Image.new("RGBA", size=(100, 100), color=(155, 0, 0))
image.save(file, "gif")
file.name = "test.gif"
file.seek(0)
return file
def test_add_revision(self):
self._create_test_image(path="")
image = models.Image.objects.get()
before_edit_rev = image.current_revision.revision_number
response = self.client.post(
reverse(
"wiki:images_add_revision",
kwargs={
"article_id": self.root_article,
"image_id": image.pk,
"path": "",
},
),
data={"image": self.generate_photo_file()},
)
self.assertRedirects(response, reverse("wiki:edit", kwargs={"path": ""}))
image = models.Image.objects.get()
self.assertEqual(models.Image.objects.count(), 1)
self.assertEqual(
image.current_revision.previous_revision.revision_number, before_edit_rev
)
def test_delete_restore_revision(self):
self._create_test_image(path="")
image = models.Image.objects.get()
before_edit_rev = image.current_revision.revision_number
response = self.client.get(
reverse(
"wiki:images_delete",
kwargs={
"article_id": self.root_article,
"image_id": image.pk,
"path": "",
},
),
)
self.assertRedirects(
response, reverse("wiki:images_index", kwargs={"path": ""})
)
image = models.Image.objects.get()
self.assertEqual(models.Image.objects.count(), 1)
self.assertEqual(
image.current_revision.previous_revision.revision_number, before_edit_rev
)
self.assertIs(image.current_revision.deleted, True)
# RESTORE
before_edit_rev = image.current_revision.revision_number
response = self.client.get(
reverse(
"wiki:images_restore",
kwargs={
"article_id": self.root_article,
"image_id": image.pk,
"path": "",
},
),
)
self.assertRedirects(
response, reverse("wiki:images_index", kwargs={"path": ""})
)
image = models.Image.objects.get()
self.assertEqual(models.Image.objects.count(), 1)
self.assertEqual(
image.current_revision.previous_revision.revision_number, before_edit_rev
)
self.assertFalse(image.current_revision.deleted)
def test_purge(self):
"""
Tests that an image is really purged
"""
self._create_test_image(path="")
image = models.Image.objects.get()
image_revision = image.current_revision.imagerevision
f_path = image_revision.image.file.name
self.assertIs(os.path.exists(f_path), True)
response = self.client.post(
reverse(
"wiki:images_purge",
kwargs={
"article_id": self.root_article,
"image_id": image.pk,
"path": "",
},
),
data={"confirm": True},
)
self.assertRedirects(
response, reverse("wiki:images_index", kwargs={"path": ""})
)
self.assertEqual(models.Image.objects.count(), 0)
self.assertIs(os.path.exists(f_path), False)
def test_add_revision_purge_image(self):
"""
Tests that an image with more than one revision is really purged
"""
# use another test to stage this one
self.test_add_revision()
image = models.Image.objects.get()
image_revision = image.current_revision.imagerevision
f_path = image_revision.image.file.name
self.assertIs(os.path.exists(f_path), True)
response = self.client.post(
reverse(
"wiki:images_purge",
kwargs={
"article_id": self.root_article,
"image_id": image.pk,
"path": "",
},
),
data={"confirm": True},
)
self.assertRedirects(
response, reverse("wiki:images_index", kwargs={"path": ""})
)
self.assertEqual(models.Image.objects.count(), 0)
self.assertIs(os.path.exists(f_path), False)
@wiki_override_settings(ACCOUNT_HANDLING=True)
def test_login_on_revision_add(self):
self._create_test_image(path="")
self.client.logout()
image = models.Image.objects.get()
url = reverse(
"wiki:images_add_revision",
kwargs={"article_id": self.root_article, "image_id": image.pk, "path": ""},
)
response = self.client.post(url, data={"image": self.generate_photo_file()})
self.assertRedirects(response, "{}?next={}".format(reverse("wiki:login"), url))
|
Luffin/powerline | refs/heads/develop | powerline/lib/vcs/__init__.py | 35 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import errno
from threading import Lock
from collections import defaultdict
from powerline.lib.watcher import create_tree_watcher
from powerline.lib.unicode import out_u
from powerline.lib.path import join
def generate_directories(path):
if os.path.isdir(path):
yield path
while True:
if os.path.ismount(path):
break
old_path = path
path = os.path.dirname(path)
if path == old_path or not path:
break
yield path
_file_watcher = None
def file_watcher(create_watcher):
global _file_watcher
if _file_watcher is None:
_file_watcher = create_watcher()
return _file_watcher
_branch_watcher = None
def branch_watcher(create_watcher):
global _branch_watcher
if _branch_watcher is None:
_branch_watcher = create_watcher()
return _branch_watcher
branch_name_cache = {}
branch_lock = Lock()
file_status_lock = Lock()
def get_branch_name(directory, config_file, get_func, create_watcher):
global branch_name_cache
with branch_lock:
# Check if the repo directory was moved/deleted
fw = branch_watcher(create_watcher)
is_watched = fw.is_watching(directory)
try:
changed = fw(directory)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
changed = True
if changed:
branch_name_cache.pop(config_file, None)
# Remove the watches for this repo
if is_watched:
fw.unwatch(directory)
fw.unwatch(config_file)
else:
# Check if the config file has changed
try:
changed = fw(config_file)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
# Config file does not exist (happens for mercurial)
if config_file not in branch_name_cache:
branch_name_cache[config_file] = out_u(get_func(directory, config_file))
if changed:
# Config file has changed or was not tracked
branch_name_cache[config_file] = out_u(get_func(directory, config_file))
return branch_name_cache[config_file]
class FileStatusCache(dict):
def __init__(self):
self.dirstate_map = defaultdict(set)
self.ignore_map = defaultdict(set)
self.keypath_ignore_map = {}
def update_maps(self, keypath, directory, dirstate_file, ignore_file_name, extra_ignore_files):
parent = keypath
ignore_files = set()
while parent != directory:
nparent = os.path.dirname(keypath)
if nparent == parent:
break
parent = nparent
ignore_files.add(join(parent, ignore_file_name))
for f in extra_ignore_files:
ignore_files.add(f)
self.keypath_ignore_map[keypath] = ignore_files
for ignf in ignore_files:
self.ignore_map[ignf].add(keypath)
self.dirstate_map[dirstate_file].add(keypath)
def invalidate(self, dirstate_file=None, ignore_file=None):
for keypath in self.dirstate_map[dirstate_file]:
self.pop(keypath, None)
for keypath in self.ignore_map[ignore_file]:
self.pop(keypath, None)
def ignore_files(self, keypath):
for ignf in self.keypath_ignore_map[keypath]:
yield ignf
file_status_cache = FileStatusCache()
def get_file_status(directory, dirstate_file, file_path, ignore_file_name, get_func, create_watcher, extra_ignore_files=()):
global file_status_cache
keypath = file_path if os.path.isabs(file_path) else join(directory, file_path)
file_status_cache.update_maps(keypath, directory, dirstate_file, ignore_file_name, extra_ignore_files)
with file_status_lock:
# Optimize case of keypath not being cached
if keypath not in file_status_cache:
file_status_cache[keypath] = ans = get_func(directory, file_path)
return ans
# Check if any relevant files have changed
file_changed = file_watcher(create_watcher)
changed = False
# Check if dirstate has changed
try:
changed = file_changed(dirstate_file)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
# The .git index file does not exist for a new git repo
return get_func(directory, file_path)
if changed:
# Remove all cached values for files that depend on this
# dirstate_file
file_status_cache.invalidate(dirstate_file=dirstate_file)
else:
# Check if the file itself has changed
try:
changed ^= file_changed(keypath)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
# Do not call get_func again for a non-existant file
if keypath not in file_status_cache:
file_status_cache[keypath] = get_func(directory, file_path)
return file_status_cache[keypath]
if changed:
file_status_cache.pop(keypath, None)
else:
# Check if one of the ignore files has changed
for ignf in file_status_cache.ignore_files(keypath):
try:
changed ^= file_changed(ignf)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
if changed:
# Invalidate cache for all files that might be affected
# by this ignore file
file_status_cache.invalidate(ignore_file=ignf)
break
try:
return file_status_cache[keypath]
except KeyError:
file_status_cache[keypath] = ans = get_func(directory, file_path)
return ans
class TreeStatusCache(dict):
def __init__(self, pl):
self.tw = create_tree_watcher(pl)
self.pl = pl
def cache_and_get(self, key, status):
ans = self.get(key, self)
if ans is self:
ans = self[key] = status()
return ans
def __call__(self, repo):
key = repo.directory
try:
if self.tw(key, ignore_event=getattr(repo, 'ignore_event', None)):
self.pop(key, None)
except OSError as e:
self.pl.warn('Failed to check {0} for changes, with error: {1}', key, str(e))
return self.cache_and_get(key, repo.status)
_tree_status_cache = None
def tree_status(repo, pl):
global _tree_status_cache
if _tree_status_cache is None:
_tree_status_cache = TreeStatusCache(pl)
return _tree_status_cache(repo)
vcs_props = (
('git', '.git', os.path.exists),
('mercurial', '.hg', os.path.isdir),
('bzr', '.bzr', os.path.isdir),
)
vcs_props_bytes = [
(vcs, vcs_dir.encode('ascii'), check)
for vcs, vcs_dir, check in vcs_props
]
def guess(path, create_watcher):
for directory in generate_directories(path):
for vcs, vcs_dir, check in (vcs_props_bytes if isinstance(path, bytes) else vcs_props):
repo_dir = os.path.join(directory, vcs_dir)
if check(repo_dir):
if os.path.isdir(repo_dir) and not os.access(repo_dir, os.X_OK):
continue
try:
if vcs not in globals():
globals()[vcs] = getattr(__import__(str('powerline.lib.vcs'), fromlist=[str(vcs)]), str(vcs))
return globals()[vcs].Repository(directory, create_watcher)
except:
pass
return None
def get_fallback_create_watcher():
from powerline.lib.watcher import create_file_watcher
from powerline import get_fallback_logger
from functools import partial
return partial(create_file_watcher, get_fallback_logger(), 'auto')
def debug():
'''Test run guess(), repo.branch() and repo.status()
To use::
python -c 'from powerline.lib.vcs import debug; debug()' some_file_to_watch.
'''
import sys
dest = sys.argv[-1]
repo = guess(os.path.abspath(dest), get_fallback_create_watcher)
if repo is None:
print ('%s is not a recognized vcs repo' % dest)
raise SystemExit(1)
print ('Watching %s' % dest)
print ('Press Ctrl-C to exit.')
try:
while True:
if os.path.isdir(dest):
print ('Branch name: %s Status: %s' % (repo.branch(), repo.status()))
else:
print ('File status: %s' % repo.status(dest))
raw_input('Press Enter to check again: ')
except KeyboardInterrupt:
pass
except EOFError:
pass
|
kevin-vb/drupdevelop | refs/heads/master | sites/all/themes/drupdevelop/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
heeraj123/oh-mainline | refs/heads/master | mysite/project/tests.py | 15 | # This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2009, 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.test.client import Client
from django_webtest import WebTest
import mysite.project.view_helpers
import mysite.account.tests
from mysite.search.models import Project
from mysite.profile.models import Person, PortfolioEntry
import mysite.project.views
import mysite.profile.views
import mysite.profile.models
import mysite.profile.view_helpers
import mock
import urlparse
import datetime
from django.core.urlresolvers import reverse
class BasicHelpers(WebTest):
def login_with_client(self,
username='paulproteus',
password="paulproteus's unbreakable password"):
client = Client()
success = client.login(username=username, password=password)
self.assertTrue(success)
return client
def login_with_client_as_barry(self):
return self.login_with_client(username='barry', password='parallelism')
class ProjectNameSearch(WebTest):
def test_search_for_similar_project_names_backend(self):
# Create one relevant, one irrelevant project
mysite.search.models.Project.create_dummy(name='Twisted System')
mysite.search.models.Project.create_dummy(name='Irrelevant')
# Call out function, hoping to find Twisted System
starts_with_twisted = (
mysite.project.view_helpers.similar_project_names('Twisted'))
self.assertEqual(['Twisted System'],
[p.name for p in starts_with_twisted])
# Same with lowercase name
starts_with_twisted = (
mysite.project.view_helpers.similar_project_names('twistEd'))
self.assertEqual(['Twisted System'],
[p.name for p in starts_with_twisted])
def test_search_for_one_matching_project_name(self):
# If there's an exactly-matching project name, we redirect to that
# project's page (instead of showing search results).
mysite.search.models.Project.create_dummy(name='Twisted System')
response = self.client.get('/projects/',
{'q': 'twiSted SysTem'},
follow=True)
self.assertEqual(
response.redirect_chain,
[('http://testserver/projects/Twisted%20System', 302)])
def test_form_sends_data_to_get(self):
# This test will fail if a query that selects one project but doesn't
# equal the project's name causes a redirect.
# First, create the project that we will refer to below.
mysite.search.models.Project.create_dummy(name='Twisted System')
# Test the logout feature by inspecting Django's client session. Log in first before you test the log out feature.
self.client = Client()
username = 'paulproteus'
password = "paulproteus's unbreakable password"
self.client.login(username=username, password=password)
# Test that the links are expectedly present or expectedly absent by using django web-test. Log in first before you test the log out feature.
search_page = self.app.get('/projects/')
search_form = search_page.form
search_form['q'] = 'Twisted'
response = search_form.submit()
self.assertEqual(response.status_code, 200)
self.assertIn('Twisted', response.content)
def test_template_get_matching_projects(self):
mysite.search.models.Project.create_dummy(name='Twisted System')
mysite.search.models.Project.create_dummy(name='Twisted Vines')
response = self.client.get('/projects/?q=Twist')
matching_projects = response.context[0]['matching_projects']
self.assertEqual(
sorted([p.name for p in matching_projects]),
sorted(['Twisted System', 'Twisted Vines']))
class ProjectList(WebTest):
def test_it_generally_works(self):
self.client.get('/projects/')
def test_space_projects_redirects_to_projects(self):
response = self.client.get("/ projects/")
self.assertEqual(response.status_code, 301)
def test_projects_returns_projects(self):
response = self.client.get("/projects/")
self.assertEqual(response.status_code, 200)
class ProjectPageCreation(BasicHelpers):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_post_handler(self):
# Show that it works
project_name = 'Something novel'
self.assertFalse(
mysite.search.models.Project.objects.filter(name=project_name))
client = self.login_with_client()
response = client.post(
reverse(mysite.project.views.create_project_page_do),
{'project_name': project_name}, follow=True)
# We successfully made the project...
self.assert_(
mysite.search.models.Project.objects.filter(name=project_name))
# and redirected to the editor.
self.assertEqual(
response.redirect_chain,
[('http://testserver/+projedit/Something%20novel', 302)])
# FIXME: Enqueue a job into session to have this user take ownership
# of this Project.
# This could easily be a log for edits.
def test_project_creator_simply_redirects_to_project_if_it_exists(self):
# Show that it works
project_name = 'Something novel'
Project.create_dummy(name=project_name.lower())
# See? We have our project in the database (with slightly different
# case, but still)
self.assertEqual(1, len(mysite.search.models.Project.objects.all()))
response = self.client.post(
reverse(mysite.project.views.create_project_page_do),
{'project_name': project_name}, follow=True)
# And we still have exactly that one project in the database.
self.assertEqual(1, len(mysite.search.models.Project.objects.all()))
# and redirected.
self.assertEqual(
response.redirect_chain,
[('http://testserver/projects/something%20novel', 302)])
class ButtonClickMarksSomeoneAsWannaHelp(BasicHelpers):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_mark_as_wanna_help(self):
person = Person.objects.get(user__username='paulproteus')
p_before = Project.create_dummy()
self.assertFalse(mysite.search.models.WannaHelperNote.objects.all())
self.assertFalse(p_before.people_who_wanna_help.all())
client = self.login_with_client()
post_to = reverse(mysite.project.views.wanna_help_do)
client.post(post_to, {u'project': unicode(p_before.pk)})
p_after = Project.objects.get(pk=p_before.pk)
self.assertEqual(
list(p_after.people_who_wanna_help.all()),
[person])
note = mysite.search.models.WannaHelperNote.objects.get()
self.assertEqual(note.person, person)
self.assertEqual(note.project, p_after)
def test_unmark_as_wanna_help(self):
# We're in there...
person = Person.objects.get(user__username='paulproteus')
p_before = Project.create_dummy()
p_before.people_who_wanna_help.add(person)
p_before.save()
mysite.search.models.WannaHelperNote.add_person_project(
person, p_before)
# Submit that project to unlist_self_from_wanna_help_do
client = self.login_with_client()
post_to = reverse(mysite.project.views.unlist_self_from_wanna_help_do)
client.post(post_to, {u'project': unicode(p_before.pk)})
# Are we gone yet?
p_after = Project.objects.get(pk=p_before.pk)
self.assertFalse(p_after.people_who_wanna_help.all())
def test_mark_as_contacted(self):
person = Person.objects.get(user__username='paulproteus')
p_before = Project.create_dummy()
p_before.people_who_wanna_help.add(person)
p_before.save()
mysite.search.models.WannaHelperNote.add_person_project(
person, p_before)
client = self.login_with_client()
post_to = reverse(mysite.project.views.mark_contacted_do)
vars = {u'mark_contact-project': unicode(p_before.pk),
u'helper-%s-checked' % (person.pk,): unicode('on'),
u'helper-%s-person_id' % (person.pk): unicode(person.pk),
u'helper-%s-project_id' % (person.pk): unicode(p_before.pk)}
client.post(post_to, vars)
whn_after = mysite.search.models.WannaHelperNote.objects.get(
person=person, project=p_before)
self.assertTrue(whn_after.contacted_on)
self.assertTrue(whn_after.contacted_by, datetime.date.today())
class WannaHelpSubmitHandlesNoProjectIdGracefully(WebTest):
def test(self):
# Submit nothing.
post_to = reverse(mysite.project.views.wanna_help_do)
response = self.client.post(post_to, {}, follow=True)
self.assertEqual(response.status_code, 400)
class WannaHelpWorksAnonymously(WebTest):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_mark_as_helper_anonymously(self):
# Steps for this test
# 1. User fills in the form anonymously
# 2. We test that the Answer is not yet saved
# 3. User logs in
# 4. We test that the Answer is saved
project_id = Project.create_dummy(name='Myproject').id
# At the start, no one wants to help our project.
self.assertFalse(Project.objects.get(id=project_id)
.people_who_wanna_help.all())
# Click the button saying we want to help!
post_to = reverse(mysite.project.views.wanna_help_do)
response = self.client.post(
post_to, {u'project': unicode(project_id)}, follow=True)
# Make sure we are redirected to the right place
self.assertEqual(
response.redirect_chain,
[('http://testserver/account/login/?'
'next=%2Fprojects%2FMyproject%3Fwanna_help%3Dtrue', 302)])
# check that the session can detect that we want to help Ubuntu out
self.assertEqual(self.client.session['projects_we_want_to_help_out'],
[project_id])
# According to the database, no one wants to help our project.
self.assertFalse(Project.objects.get(id=project_id)
.people_who_wanna_help.all())
# But when the user is logged in and *then* visits the project page
login_worked = self.client.login(
username='paulproteus',
password="paulproteus's unbreakable password")
self.assert_(login_worked)
# Visit the project page...
self.client.get(Project.objects.get(id=project_id).get_url())
# After the GET, we've removed our note in the session
self.assertFalse(
self.client.session.get('projects_we_want_to_help_out', None))
# then the DB knows the user wants to help out!
self.assertEqual(
list(Project.objects.get(id=project_id)
.people_who_wanna_help.all()),
[Person.objects.get(user__username='paulproteus')])
self.assert_(mysite.search.models.WannaHelperNote.objects.all())
# Say we're not interested anymore.
post_to = reverse(mysite.project.views.unlist_self_from_wanna_help_do)
response = self.client.post(
post_to, {u'project': unicode(project_id)}, follow=True)
# And now the DB shows we have removed ourselves.
self.assertFalse(Project.objects.get(id=project_id)
.people_who_wanna_help.all())
self.assertFalse(mysite.search.models.WannaHelperNote.objects.all())
class ProjectPageTellsNextStepsForHelpersToBeExpanded(BasicHelpers):
fixtures = ['user-paulproteus', 'person-paulproteus',
'miro-project']
def test_default_to_false(self): # FIXME: Make it default to True soon
client = self.login_with_client()
response = client.get('/projects/Miro')
self.assertFalse(response.context[0].get(
'expand_next_steps', None))
class OffsiteAnonymousWannaHelpWorks(WebTest):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test(self):
# Steps for this test
# 1. User POSTs to the wannahelp POST handler, indicating the request
# came from offsite
# 2. User is redirected to a login page that knows the request came
# from offsite
project_id = Project.create_dummy(name='Myproject').id
# At the start, no one wants to help our project.
self.assertFalse(Project.objects.get(id=project_id)
.people_who_wanna_help.all())
# Click the button saying we want to help!
post_to = reverse(mysite.project.views.wanna_help_do)
response = self.client.post(post_to,
{u'project': unicode(project_id),
u'from_offsite': u'True'}, follow=True)
# Make sure the session knows we came from offsite. Login-related
# templates want this knowledge.
self.assert_(self.client.session.get('from_offsite', False))
# FIXME: There should be a cancel button letting the user
# destroy the session and then go back to the Referring page.
# Make sure we are redirected to the right place
self.assertEqual(
response.redirect_chain,
[('http://testserver/account/login/?'
'next=%2Fprojects%2FMyproject%3Fwanna_help%3Dtrue', 302)])
lucky_projects = (mysite.project.view_helpers.
get_wanna_help_queue_from_session(self.client.
session))
self.assertEqual([k.name for k in lucky_projects], ['Myproject'])
class DecideWhichProjectDescriptionsAppearOnProjectPage(BasicHelpers):
fixtures = ['user-paulproteus', 'person-paulproteus',
'user-barry', 'person-barry']
def test(self):
# Create a project.
project = Project.create_dummy()
# Create two profiles, each with a PortfolioEntry linking it to the
# project, each with descriptions.
def create_pfe_with_description(username):
return PortfolioEntry.create_dummy(project=project,
person=Person.get_by_username(
username),
is_published=True)
pfes = {'uncheck_me': create_pfe_with_description('paulproteus'),
'keep_me_checked': create_pfe_with_description('barry')}
# Get a list of PortfolioEntries that we use to get a random project
# description for the project page.
descriptions = project.get_pfentries_with_usable_descriptions()
# Observe that the list contains both PortfolioEntries.
for entry in pfes.values():
self.assert_(entry in descriptions)
# Log in as paulproteus
username = 'paulproteus'
self.login_with_client()
# Go to the project page.
url = urlparse.urljoin(
"http://openhatch.org", project.get_edit_page_url())
edit_page = self.app.get(url, user=username)
# In preparation for the next set of assertions, make sure that the
# entries don't have the same description.
self.assertNotEqual(
pfes['uncheck_me'].project_description,
pfes['keep_me_checked'].project_description)
# See a list of project descriptions on the page, which equals the
# list of descriptions in the DB.
# for entry in pfes.values():
# tc.find(entry.project_description)
# Uncheck one of the checkboxes and submit the form
name_of_checkbox_to_uncheck = "%s-use_my_description" % pfes[
'uncheck_me'].pk
# We have to know that the correct form is the 2nd item
edit_form = edit_page.forms[1]
edit_form[name_of_checkbox_to_uncheck] = False
edit_form.submit()
# Get a list of the PortfolioEntries that we use to get a random
# project description for the project page.
good_pfentries = project.get_pfentries_with_usable_descriptions()
# Observe that the list contains only the checked PortfolioEntry.
self.assert_(pfes['uncheck_me'] not in good_pfentries)
self.assert_(pfes['keep_me_checked'] in good_pfentries)
class BugTrackersOnProjectEditPage(WebTest):
fixtures = ['user-paulproteus', 'person-paulproteus',
'user-barry', 'person-barry']
def setUp(self):
super(BugTrackersOnProjectEditPage, self).setUp()
self.twisted = mysite.search.models.Project.create_dummy(
name='Twisted System')
def test_empty_at_start(self):
self.assertFalse(self.twisted.get_corresponding_bug_trackers())
def test_trackers_created_for_project_show_up(self):
# Create a Roundup model
bug_tracker = mysite.customs.models.RoundupTrackerModel(
tracker_name='dummy',
base_url='http://example.com/',
closed_status='resolved')
bug_tracker.created_for_project = self.twisted
bug_tracker.save()
# Now, the Twisted project should have one corresponding bug tracker
trackers_from_project = list(
self.twisted.get_corresponding_bug_trackers())
self.assertEqual([bug_tracker], trackers_from_project)
|
tammoippen/nest-simulator | refs/heads/master | topology/examples/conncon_sources.py | 16 | # -*- coding: utf-8 -*-
#
# conncon_sources.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create two 30x30 layers of iaf_psc_alpha neurons,
connect with convergent projection and rectangular mask,
visualize connection from target perspective.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import nest
import nest.topology as topo
import pylab
pylab.ion()
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
# create two test layers
a = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
b = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
topo.ConnectLayers(a, b, {'connection_type': 'convergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.5],
'upper_right': [0.2, 0.5]}},
'kernel': 0.5,
'weights': {'uniform': {'min': 0.5, 'max': 2.0}},
'delays': 1.0})
pylab.clf()
# plot sources of neurons in different grid locations
for tgt_pos in [[15, 15], [0, 0]]:
# obtain node id for center
tgt = topo.GetElement(b, tgt_pos)
# obtain list of outgoing connections for ctr
# int() required to cast numpy.int64
spos = tuple(zip(*[topo.GetPosition([int(conn[0])])[0] for conn in
nest.GetConnections(target=tgt)]))
# scatter-plot
pylab.scatter(spos[0], spos[1], 20, zorder=10)
# mark sender position with transparent red circle
ctrpos = pylab.array(topo.GetPosition(tgt)[0])
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.1, zorder=99,
fc='r', alpha=0.4, ec='none'))
# mark mask position with open red rectangle
pylab.gca().add_patch(
pylab.Rectangle(ctrpos - (0.2, 0.5), 0.4, 1.0, zorder=1,
fc='none', ec='r', lw=3))
# mark layer edge
pylab.gca().add_patch(pylab.Rectangle((-1.5, -1.5), 3.0, 3.0, zorder=1,
fc='none', ec='k', lw=3))
# beautify
pylab.axes().set_xticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.axes().set_yticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.grid(True)
pylab.axis([-2.0, 2.0, -2.0, 2.0])
pylab.axes().set_aspect('equal', 'box')
pylab.title('Connection sources')
|
reddraggone9/youtube-dl | refs/heads/master | youtube_dl/extractor/extremetube.py | 2 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import (
int_or_none,
str_to_int,
)
class ExtremeTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?extremetube\.com/(?:[^/]+/)?video/(?P<id>[^/#?&]+)'
_TESTS = [{
'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
'md5': '344d0c6d50e2f16b06e49ca011d8ac69',
'info_dict': {
'id': 'music-video-14-british-euro-brit-european-cumshots-swallow-652431',
'ext': 'mp4',
'title': 'Music Video 14 british euro brit european cumshots swallow',
'uploader': 'unknown',
'view_count': int,
'age_limit': 18,
}
}, {
'url': 'http://www.extremetube.com/gay/video/abcde-1234',
'only_matching': True,
}, {
'url': 'http://www.extremetube.com/video/latina-slut-fucked-by-fat-black-dick',
'only_matching': True,
}, {
'url': 'http://www.extremetube.com/video/652431',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
video_title = self._html_search_regex(
r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
uploader = self._html_search_regex(
r'Uploaded by:\s*</strong>\s*(.+?)\s*</div>',
webpage, 'uploader', fatal=False)
view_count = str_to_int(self._html_search_regex(
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
webpage, 'view count', fatal=False))
flash_vars = self._parse_json(
self._search_regex(
r'var\s+flashvars\s*=\s*({.+?});', webpage, 'flash vars'),
video_id)
formats = []
for quality_key, video_url in flash_vars.items():
height = int_or_none(self._search_regex(
r'quality_(\d+)[pP]$', quality_key, 'height', default=None))
if not height:
continue
f = {
'url': video_url,
}
mobj = re.search(
r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+', video_url)
if mobj:
height = int(mobj.group('height'))
bitrate = int(mobj.group('bitrate'))
f.update({
'format_id': '%dp-%dk' % (height, bitrate),
'height': height,
'tbr': bitrate,
})
else:
f.update({
'format_id': '%dp' % height,
'height': height,
})
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
'view_count': view_count,
'age_limit': 18,
}
|
40443111/2017springcd_hw | refs/heads/gh-pages | course/a_grouping.py | 39 | content = open("2016fall_cadp_a_groups.txt").read()
#print(content)
result = content.splitlines()
#print(result)
gno = 1
for i in range(len(result)):
#print(result[i])
group = result[i].split(":")
#列出組序
print("<a href='../g"+str(gno)+"'>第"+str(gno)+"組</a>:<br />")
#print("第"+str(gno)+"組:<br />")
#取 group 第2成員, 用逗點隔開納入數列後, 利用[:-1]去掉最後空白
#print(group[1].split(",")[:-1])
gmember = group[1].split(",")[:-1]
for j in range(len(gmember)):
#print(gmember[j])
print("<a href='../g"+str(gno)+"/"+str(gmember[j])+"'>"+ \
str(gmember[j])+"</a> ")
print("<br />")
gno = gno + 1
|
silviaegt/bdcv_metadata | refs/heads/master | Comparador/ComFunctions.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 20:37:30 2017
@author: Antonio
Las bibliotecas externas son csv y os
La versión de Python usada es 3.6.0
"""
from csv import reader
import os
"""
* Función que se encarga de comparar dos directorios para determinar cuáles
* archivos del directorio 1 no se encuentran en el directorio 2
* Recibe:
* l1: conjunto con los nombres del directorio 1
* car1: nombre del directorio raíz 1
* l1: conjunto con los nombres del directorio 2
* car1: nombre del directorio raíz 2
* Genera:
* Archivo con el reporte
"""
def compare(l1,car1,l2,car2):
for i in list(l1):
tmp = i.replace(".txt",'')
if i in l2:
com = getTerms("./"+car1+"/Tablas/"+i)
ref = getTerms("./"+car2+"/Tablas/"+i)
file = open("./comparación/"+car1+"vs"+car2+"_"+tmp+".txt",'w',encoding="iso-8859-1")
dif = list(com.difference(ref))
for k in dif:
file.write(k+"\n")
file.close()
"""
* Función que se encarga de obtener los nombres de los archivos dentro de un
* directorio
* Recibe:
* msg: mensaje que se muestra para pedir el nombre del directorio principal
* rte: subdirectorio donde se encuentran los archivos de interes
* Regresa:
* l: conjunto en el que se encuentran los nombres de los archivos
* car: nombre del directorio principal
"""
def getLists(msg,rte):
car = input(msg)
l = set(os.listdir("./"+car+rte))
return l,car
"""
* Función que se encarga de generar un diccionario de datos de un archivo csv
* Recibe:
* filename: nombre del archivo con el que se generará el diccionario
* Regresa:
* dic: diccionario con los datos
"""
def getTerms(filename):
file = list(reader(open(filename,'r',encoding="iso-8859-1"), delimiter=','))
dic = {}
for i in file:
dic[i[1]]=i[0]
return dic
|
amyodov/python-datetimeex | refs/heads/master | datetimeex/_timedeltaex.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
import numbers
from datetime import date, datetime, time, timedelta
from fractions import Fraction
from _common import (MICROSECONDS_IN_SECOND, MICROSECONDS_IN_MINUTE,
MICROSECONDS_IN_HOUR, MICROSECONDS_IN_DAY,
t_to_mus, mus_to_t, td_to_mus, mus_to_td,
_PY3K, DummyTZInfo)
from _timeex import TimeEx
class TimeDeltaEx(timedelta, numbers.Real):
"""
Enhanced datetime.timedelta, with various additional operations.
"""
__slots__ = ()
def __repr__(self):
"""
>>> TimeDeltaEx.from_timedelta(timedelta(0))
TimeDeltaEx(0)
>>> TimeDeltaEx.from_timedelta(timedelta(3))
TimeDeltaEx(3)
>>> TimeDeltaEx.from_timedelta(timedelta(3, 14))
TimeDeltaEx(3, 14)
>>> TimeDeltaEx.from_timedelta(timedelta(3, 14, 15))
TimeDeltaEx(3, 14, 15)
"""
if not self.microseconds:
if not self.seconds:
return "TimeDeltaEx({0:d})".format(self.days)
else:
return "TimeDeltaEx({0:d}, {1:d})".format(self.days, self.seconds)
else:
return "TimeDeltaEx({0:d}, {1:d}, {2:d})"\
.format(self.days, self.seconds, self.microseconds)
def as_timedelta(self):
"""
Convert the TimeDeltaEx to the new datetime.timedelta
(even though TimeDeltaEx is its subclass and can be used instead
almost anywhere).
This is not a property, to reflect a fact that a new datetime.timedelta
is created rather than the access to the internals of TimeDeltaEx.
>>> TimeDeltaEx(3, 14, 15).as_timedelta()
datetime.timedelta(3, 14, 15)
@rtype: timedelta
"""
return timedelta(self.days, self.seconds, self.microseconds)
@classmethod
def from_timedelta(cls, td):
"""
Create a new TimeDeltaEx from a basic datetime.timedelta.
>>> TimeDeltaEx.from_timedelta(timedelta(3, 14, 15))
TimeDeltaEx(3, 14, 15)
@type td: timedelta
@rtype: TimeDeltaEx
"""
assert isinstance(td, timedelta), repr(td)
return cls(td.days, td.seconds, td.microseconds)
@property
def in_microseconds(self):
"""
The number of microseconds in the time interval.
The number is always integer, due to inherent storage limitation.
Under Python 3.x, this property has two synonims:
in_microseconds and in_µs.
>>> TimeDeltaEx(3, 14, 15).in_microseconds
259214000015
>>> # Test µs_to_td() in Python 3.x only
>>> not _PY3K or eval("TimeDeltaEx(3, 14, 15).in_microseconds == 259214000015")
True
@rtype: numbers.Number
"""
return td_to_mus(self)
if _PY3K:
exec("in_µs = in_microseconds")
@classmethod
def from_microseconds(cls, microseconds):
"""
Given a (numeric) duration in microseconds, create
the appropriate TimeDeltaEx.
Sub-microsecond precision may be lost due to inherent storage limitations.
Under Python 3.x, this function has two synonims:
from_microseconds() and from_µs().
>>> TimeDeltaEx.from_microseconds(259214000015)
TimeDeltaEx(3, 14, 15)
>>> TimeDeltaEx.from_microseconds(259214000015.222)
TimeDeltaEx(3, 14, 15)
# Test from_µs() in Python 3.x only
>>> not _PY3K or eval("TimeDeltaEx.from_µs(259214000015) == \
TimeDeltaEx(3, 14, 15)")
True
@type microseconds: numbers.Number
@rtype: TimeDeltaEx
"""
assert isinstance(microseconds, numbers.Number), repr(microseconds)
return cls(microseconds = microseconds)
if _PY3K:
exec("from_µs = from_microseconds")
@property
def in_seconds(self):
"""
The number of seconds in the time interval.
As the interval may have sub-second precision, to represent
as the number of seconds this property provides a fractions.Fraction.
>>> TimeDeltaEx(3, 14, 15).in_seconds
Fraction(51842800003, 200000)
>>> str(TimeDeltaEx(3, 14, 15).in_seconds)
'51842800003/200000'
>>> float(TimeDeltaEx(3, 14, 15).in_seconds)
259214.000015
@rtype: numbers.Rational
"""
return Fraction(self.in_microseconds, 1000000)
def __div__(self, divisor):
"""
Divide TimeDeltaEx by some datetime.timedelta or a number.
For dividing TimeDeltaEx by datetime.timedelta,
the result will be a ratio.
For dividing TimeDeltaEx by a number, the result will be a TimeDeltaEx
(the precision may be lost though).
>>> TimeDeltaEx(seconds=5) / timedelta(seconds=2)
Fraction(5, 2)
>>> TimeDeltaEx(seconds=5) / 4
TimeDeltaEx(0, 1, 250000)
>>> TimeDeltaEx(microseconds=75) / 2.5
TimeDeltaEx(0, 0, 30)
@type divisor: timedelta, numbers.Number
@rtype: TimeDeltaEx, numbers.Rational
"""
if isinstance(divisor, timedelta):
return Fraction(self.in_microseconds, td_to_mus(divisor))
elif isinstance(divisor, numbers.Number):
return TimeDeltaEx.from_microseconds(self.in_microseconds / divisor)
else:
raise NotImplementedError("{0!r} / {1!r}".format(self, divisor))
__truediv__ = __div__
def __rdiv__(self, dividend):
"""
The dividend is divided by this TimeDeltaEx.
The dividend must be a datetime.timedelta.
The result is a ratio.
>>> timedelta(seconds=5) / TimeDeltaEx(seconds=2)
Fraction(5, 2)
@type dividend: timedelta
@rtype: numbers.Rational
"""
assert isinstance(dividend, (timedelta, numbers.Number)), \
repr(dividend)
return Fraction(td_to_mus(dividend), self.in_microseconds)
__rtruediv__ = __rdiv__
def __floordiv__(self, divisor):
"""
Divide TimeDeltaEx by some datetime.timedelta or a number,
with subsequent flooring to the integer value.
For dividing TimeDeltaEx by datetime.timedelta,
the result will be an integer number.
For dividing TimeDeltaEx by a number, the result will be a TimeDeltaEx
(the precision may be lost though).
>>> TimeDeltaEx(seconds=5) // timedelta(seconds=2)
2
>>> TimeDeltaEx(seconds=5) // 4
TimeDeltaEx(0, 1, 250000)
>>> TimeDeltaEx(microseconds=75) // 2.6
TimeDeltaEx(0, 0, 28)
@type divisor: timedelta, numbers.Number
@rtype: TimeDeltaEx, numbers.Number
"""
if isinstance(divisor, timedelta):
return self.in_microseconds // td_to_mus(divisor)
elif isinstance(divisor, numbers.Number):
return TimeDeltaEx.from_microseconds(self.in_microseconds //
divisor)
else:
raise NotImplementedError("{0!r} // {1!r}!".format(self, divisor))
def __rfloordiv__(self, dividend):
"""
The dividend is divided by this TimeDeltaEx,
with subsequent flooring to the integer value.
The dividend must be a datetime.timedelta (or any compatible subclass).
The result is a number.
>>> timedelta(seconds=5) // TimeDeltaEx(seconds=2)
2
@type dividend: timedelta
@rtype: number.Number
"""
assert isinstance(dividend, timedelta), repr(dividend)
return td_to_mus(dividend) // self.in_microseconds
def __mod__(self, divisor):
"""
Find modulo for division of TimeDeltaEx by some
datetime.timedelta.
For dividing TimeDeltaEx by a datetime.timedelta,
the modulo will be a TimeDeltaEx.
The modulo for dividing TimeDeltaEx by a regular number
is not defined.
>>> TimeDeltaEx(seconds=42) % timedelta(seconds=11)
TimeDeltaEx(0, 9)
@type divisor: timedelta
@rtype: TimeDeltaEx
"""
if isinstance(divisor, timedelta):
return TimeDeltaEx.from_microseconds(self.in_microseconds %
td_to_mus(divisor))
else:
raise NotImplementedError("{0!r} % {1!r}".format(self, divisor))
def __rmod__(self, dividend):
"""
Find modulo for division of some datetime.timedelta
by the TimeDeltaEx.
For dividing some datetime.timedelta by this TimeDeltaEx,
the modulo will be a TimeDeltaEx.
The modulo for dividing any other type by this TimeDeltaEx
is not defined.
>>> timedelta(seconds=42) % TimeDeltaEx(seconds=11)
TimeDeltaEx(0, 9)
@type dividend: timedelta
@rtype: TimeDeltaEx
"""
assert isinstance(dividend, timedelta), repr(dividend)
return TimeDeltaEx.from_microseconds(td_to_mus(dividend) %
self.in_microseconds)
def __divmod__(self, divisor):
"""
Calculate both the result of division and the modulo
for division of this TimeDeltaEx by some datetime.timedelta.
Even though the TimeDeltaEx may be divided to the regular number,
the modulo for such operation cannot be calculated.
>>> divmod(TimeDeltaEx(seconds=42), timedelta(seconds=11))
(3, TimeDeltaEx(0, 9))
@type divisor: timedelta
@rtype: tuple
"""
if isinstance(divisor, timedelta):
_d, _m = divmod(self.in_microseconds, td_to_mus(divisor))
return (_d, TimeDeltaEx.from_microseconds(_m))
else:
raise NotImplementedError("divmod({0!r}, {1!r})"
.format(self, divisor))
def __rdivmod__(self, dividend):
"""
Calculate both the result of division and the modulo
for division of some datetime.timedelta by this TimeDeltaEx.
>>> divmod(timedelta(seconds=42), TimeDeltaEx(seconds=11))
(3, TimeDeltaEx(0, 9))
@type dividend: timedelta
@rtype: TimeDeltaEx
"""
assert isinstance(dividend, timedelta), repr(dividend)
_d, _m = divmod(td_to_mus(dividend), self.in_microseconds)
return (_d, TimeDeltaEx.from_microseconds(_m))
def __mul__(self, n):
"""
Multiplicate the TimeDeltaEx by a number.
The sub-microsecond precision may be lost.
>>> TimeDeltaEx(seconds=5) * 5
TimeDeltaEx(0, 25)
>>> 5 * TimeDeltaEx(seconds=5)
TimeDeltaEx(0, 25)
>>> TimeDeltaEx(microseconds=50) * 0.77
TimeDeltaEx(0, 0, 39)
>>> 0.77 * TimeDeltaEx(microseconds=50)
TimeDeltaEx(0, 0, 39)
@type n: numbers.Number
@rtype: TimeDeltaEx
"""
if isinstance(n, numbers.Number):
return TimeDeltaEx.from_microseconds(self.in_microseconds * n)
else:
raise NotImplementedError("{0!r} * {1!r}".format(self, n))
__rmul__ = __mul__
def __add__(self, summand):
"""
Add this TimeDeltaEx to the datetime.date, datetime.datetime,
datetime.time (with possible wrapping at the midnight),
or to the datetime.timedelta.
Whenever another summand is the datetime.date,
the result is automatically enhanced from datetime.date to DateEx.
Whenever another summand is the datetime.datetime,
the result is automatically enhanced from datetime.datetime to DateTimeEx.
Whenever another summand is the datetime.time,
the result is automatically enhanced from datetime.time to TimeEx.
Whenever another summand is the datetime.timedelta,
the result is automatically enhanced from datetime.timedelta
to TimeDeltaEx.
>>> time(23, 44, 55) + TimeDeltaEx(hours = 3, minutes = 20)
TimeEx(3, 4, 55)
>>> TimeEx(23, 44, 55) + TimeDeltaEx(hours = 3, minutes = 20)
TimeEx(3, 4, 55)
>>> time(23, 44, 55, tzinfo=DummyTZInfo()) + TimeDeltaEx(hours=3, minutes=20)
TimeEx(3, 4, 55, tzinfo=<DummyTZInfo>)
>>> TimeEx(23, 44, 55, tzinfo=DummyTZInfo()) + TimeDeltaEx(hours=3, minutes =20)
TimeEx(3, 4, 55, tzinfo=<DummyTZInfo>)
>>> TimeDeltaEx(hours=3, minutes=20) + time(23, 44, 55)
TimeEx(3, 4, 55)
>>> TimeDeltaEx(hours=3, minutes=20) + TimeEx(23, 44, 55)
TimeEx(3, 4, 55)
>>> TimeDeltaEx(hours=3, minutes=20) + time(23, 44, 55, tzinfo=DummyTZInfo())
TimeEx(3, 4, 55, tzinfo=<DummyTZInfo>)
>>> TimeDeltaEx(hours=3, minutes=20) + TimeEx(23, 44, 55, tzinfo=DummyTZInfo())
TimeEx(3, 4, 55, tzinfo=<DummyTZInfo>)
>>> TimeDeltaEx(3, 14, 15, 92) + timedelta(2, 71, 82, 81)
TimeDeltaEx(5, 85, 173097)
>>> timedelta(2, 71, 82, 81) + TimeDeltaEx(3, 14, 15, 92)
TimeDeltaEx(5, 85, 173097)
@type summand: date, datetime, time, timedelta
@rtype: TimeDeltaEx
"""
assert isinstance(summand, (date, datetime, time, timedelta)), \
repr(summand)
if isinstance(summand, date):
# TODO
raise NotImplementedError("Not yet implemented!")
elif isinstance(summand, datetime):
# TODO
raise NotImplementedError("Not yet implemented!")
elif isinstance(summand, time):
return TimeEx.from_microseconds(self.in_microseconds + t_to_mus(summand),
tzinfo=summand.tzinfo)
elif isinstance(summand, timedelta):
return TimeDeltaEx.from_microseconds(td_to_mus(self) + td_to_mus(summand))
else:
raise NotImplementedError("{0!r} + {1!r}".format(self, summand))
__radd__ = __add__
def __sub__(self, subtrahend):
"""
Subtract a datetime.timedelta from this TimeDeltaEx.
>>> TimeDeltaEx(3, 4, 15, 92) - timedelta(2, 71, 82, 81)
TimeDeltaEx(0, 86333, 10933)
>>> TimeDeltaEx(2, 71, 82, 81) - timedelta(3, 4, 15, 92)
TimeDeltaEx(-1, 66, 989067)
@type subtrahend: timedelta
@rtype: TimeDeltaEx
"""
if isinstance(subtrahend, timedelta):
return TimeDeltaEx.from_microseconds(self.in_microseconds -
td_to_mus(subtrahend))
else:
raise NotImplementedError("{0!r} - {1!r}".format(self, subtrahend))
def __rsub__(self, minuend):
"""
This TimeDeltaEx is subtracted from the datetime.date, datetime.datetime,
datetime.time (with possible wrapping at the midnight),
or datetime.timedelta.
Whenever the minuend is the datetime.date,
the result is automatically enhanced from datetime.date to DateEx.
Whenever the minuend is the datetime.datetime,
the result is automatically enhanced from datetime.datetime to DateTimeEx.
Whenever the minuend is the datetime.time,
the result is automatically enhanced from datetime.time to TimeEx.
Whenever the minuend is the datetime.timedelta,
the result is automatically enhanced from datetime.timedelta
to TimeDeltaEx.
>>> TimeEx(3, 4, 15, 92) - TimeDeltaEx(0, 2, 71, 82)
TimeEx(3, 4, 12, 918021)
>>> TimeEx(3, 4, 15, 92) - TimeDeltaEx(2, 71, 82, 81)
TimeEx(3, 3, 3, 919010)
>>> TimeEx(3, 4, 15, 92, tzinfo=DummyTZInfo()) - TimeDeltaEx(2, 71, 82, 81)
TimeEx(3, 3, 3, 919010, tzinfo=<DummyTZInfo>)
>>> time(3, 4, 15, 92) - TimeDeltaEx(0, 2, 71, 82)
TimeEx(3, 4, 12, 918021)
>>> time(3, 4, 15, 92) - TimeDeltaEx(2, 71, 82, 81)
TimeEx(3, 3, 3, 919010)
>>> time(3, 4, 15, 92, tzinfo=DummyTZInfo()) - TimeDeltaEx(2, 71, 82, 81)
TimeEx(3, 3, 3, 919010, tzinfo=<DummyTZInfo>)
>>> timedelta(3, 4, 15, 92) - TimeDeltaEx(2, 71, 82, 81)
TimeDeltaEx(0, 86333, 10933)
>>> timedelta(2, 71, 82, 81) - TimeDeltaEx(3, 4, 15, 92)
TimeDeltaEx(-1, 66, 989067)
@type minuend: date, datetime, time, timedelta
@rtype: date, datetime, time, timedelta
"""
if isinstance(minuend, date):
# TODO
raise NotImplementedError("Not yet implemented!")
elif isinstance(minuend, datetime):
# TODO
raise NotImplementedError("Not yet implemented!")
elif isinstance(minuend, time):
return TimeEx.from_microseconds(t_to_mus(minuend) - self.in_microseconds,
tzinfo=minuend.tzinfo)
elif isinstance(minuend, timedelta):
return TimeDeltaEx.from_microseconds(td_to_mus(minuend) -
self.in_microseconds)
else:
raise NotImplementedError("{0!r} - {1!r}".format(minuend, self))
# Run unittests, if executed directly.
if __name__ == "__main__":
import doctest
doctest.testmod()
|
j5shi/Thruster | refs/heads/master | pylibs/unittest/test/test_runner.py | 1 | import unittest
from cStringIO import StringIO
import pickle
from .support import LoggingResult, ResultWithNoStartTestRunStopTestRun
class TestCleanUp(unittest.TestCase):
def testCleanUp(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
self.assertEqual(test._cleanups, [])
cleanups = []
def cleanup1(*args, **kwargs):
cleanups.append((1, args, kwargs))
def cleanup2(*args, **kwargs):
cleanups.append((2, args, kwargs))
test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye')
test.addCleanup(cleanup2)
self.assertEqual(test._cleanups,
[(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')),
(cleanup2, (), {})])
result = test.doCleanups()
self.assertTrue(result)
self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3),
dict(four='hello', five='goodbye'))])
def testCleanUpWithErrors(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
class MockResult(object):
errors = []
def addError(self, test, exc_info):
self.errors.append((test, exc_info))
result = MockResult()
test = TestableTest('testNothing')
test._resultForDoCleanups = result
exc1 = Exception('foo')
exc2 = Exception('bar')
def cleanup1():
raise exc1
def cleanup2():
raise exc2
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
self.assertFalse(test.doCleanups())
(test1, (Type1, instance1, _)), (test2, (Type2, instance2, _)) = reversed(MockResult.errors)
self.assertEqual((test1, Type1, instance1), (test, Exception, exc1))
self.assertEqual((test2, Type2, instance2), (test, Exception, exc2))
def testCleanupInRun(self):
blowUp = False
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
if blowUp:
raise Exception('foo')
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
def cleanup2():
ordering.append('cleanup2')
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
def success(some_test):
self.assertEqual(some_test, test)
ordering.append('success')
result = unittest.TestResult()
result.addSuccess = success
test.run(result)
self.assertEqual(ordering, ['setUp', 'test', 'tearDown',
'cleanup2', 'cleanup1', 'success'])
blowUp = True
ordering = []
test = TestableTest('testNothing')
test.addCleanup(cleanup1)
test.run(result)
self.assertEqual(ordering, ['setUp', 'cleanup1'])
def testTestCaseDebugExecutesCleanups(self):
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
self.addCleanup(cleanup1)
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
test.addCleanup(cleanup2)
def cleanup2():
ordering.append('cleanup2')
test.debug()
self.assertEqual(ordering, ['setUp', 'test', 'tearDown', 'cleanup1', 'cleanup2'])
class Test_TextTestRunner(unittest.TestCase):
"""Tests for TextTestRunner."""
def test_init(self):
runner = unittest.TextTestRunner()
self.assertFalse(runner.failfast)
self.assertFalse(runner.buffer)
self.assertEqual(runner.verbosity, 1)
self.assertTrue(runner.descriptions)
self.assertEqual(runner.resultclass, unittest.TextTestResult)
def test_multiple_inheritance(self):
class AResult(unittest.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(AResult, self).__init__(stream, descriptions, verbosity)
class ATextResult(unittest.TextTestResult, AResult):
pass
# This used to raise an exception due to TextTestResult not passing
# on arguments in its __init__ super call
ATextResult(None, None, 1)
def testBufferAndFailfast(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=StringIO(), failfast=True,
buffer=True)
# Use our result object
runner._makeResult = lambda: result
runner.run(Test('testFoo'))
self.assertTrue(result.failfast)
self.assertTrue(result.buffer)
def testRunnerRegistersResult(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
originalRegisterResult = unittest.runner.registerResult
def cleanup():
unittest.runner.registerResult = originalRegisterResult
self.addCleanup(cleanup)
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=StringIO())
# Use our result object
runner._makeResult = lambda: result
self.wasRegistered = 0
def fakeRegisterResult(thisResult):
self.wasRegistered += 1
self.assertEqual(thisResult, result)
unittest.runner.registerResult = fakeRegisterResult
runner.run(unittest.TestSuite())
self.assertEqual(self.wasRegistered, 1)
def test_works_with_result_without_startTestRun_stopTestRun(self):
class OldTextResult(ResultWithNoStartTestRunStopTestRun):
separator2 = ''
def printErrors(self):
pass
class Runner(unittest.TextTestRunner):
def __init__(self):
super(Runner, self).__init__(StringIO())
def _makeResult(self):
return OldTextResult()
runner = Runner()
runner.run(unittest.TestSuite())
def test_startTestRun_stopTestRun_called(self):
class LoggingTextResult(LoggingResult):
separator2 = ''
def printErrors(self):
pass
class LoggingRunner(unittest.TextTestRunner):
def __init__(self, events):
super(LoggingRunner, self).__init__(StringIO())
self._events = events
def _makeResult(self):
return LoggingTextResult(self._events)
events = []
runner = LoggingRunner(events)
runner.run(unittest.TestSuite())
expected = ['startTestRun', 'stopTestRun']
self.assertEqual(events, expected)
def test_pickle_unpickle(self):
# Issue #7197: a TextTestRunner should be (un)pickleable. This is
# required by test_multiprocessing under Windows (in verbose mode).
from StringIO import StringIO as PickleableIO
# cStringIO objects are not pickleable, but StringIO objects are.
stream = PickleableIO("foo")
runner = unittest.TextTestRunner(stream)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(runner, protocol=protocol)
obj = pickle.loads(s)
# StringIO objects never compare equal, a cheap test instead.
self.assertEqual(obj.stream.getvalue(), stream.getvalue())
def test_resultclass(self):
def MockResultClass(*args):
return args
STREAM = object()
DESCRIPTIONS = object()
VERBOSITY = object()
runner = unittest.TextTestRunner(STREAM, DESCRIPTIONS, VERBOSITY,
resultclass=MockResultClass)
self.assertEqual(runner.resultclass, MockResultClass)
expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY)
self.assertEqual(runner._makeResult(), expectedresult)
if __name__ == '__main__':
unittest.main()
|
jhoos/django | refs/heads/master | django/contrib/gis/geos/prototypes/geom.py | 288 | from ctypes import POINTER, c_char_p, c_int, c_size_t, c_ubyte
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_sized_string, check_string, check_zero,
)
# This is the return type used by binary output (WKB, HEX) routines.
c_uchar_p = POINTER(c_ubyte)
# We create a simple subclass of c_char_p here because when the response
# type is set to c_char_p, you get a _Python_ string and there's no way
# to access the string's address inside the error checking function.
# In other words, you can't free the memory allocated inside GEOS. Previously,
# the return type would just be omitted and the integer address would be
# used -- but this allows us to be specific in the function definition and
# keeps the reference so it may be free'd.
class geos_char_p(c_char_p):
pass
# ### ctypes factory classes ###
class BinConstructor(GEOSFuncFactory):
"Generates a prototype for binary construction (HEX, WKB) GEOS routines."
argtypes = [c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# HEX & WKB output
class BinOutput(GEOSFuncFactory):
"Generates a prototype for the routines that return a sized string."
argtypes = [GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
class GeomOutput(GEOSFuncFactory):
"For GEOS routines that return a geometry."
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
def get_func(self, argtypes):
self.argtypes = argtypes
return super(GeomOutput, self).get_func()
class IntFromGeom(GEOSFuncFactory):
"Argument is a geometry, return type is an integer."
argtypes = [GEOM_PTR]
restype = c_int
def get_func(self, zero=False):
if zero:
self.errcheck = check_zero
else:
self.errcheck = check_minus_one
return super(IntFromGeom, self).get_func()
class StringFromGeom(GEOSFuncFactory):
"Argument is a Geometry, return type is a string."
argtypes = [GEOM_PTR]
restype = geos_char_p
errcheck = staticmethod(check_string)
# ### ctypes prototypes ###
# Deprecated creation routines from WKB, HEX, WKT
from_hex = BinConstructor('GEOSGeomFromHEX_buf')
from_wkb = BinConstructor('GEOSGeomFromWKB_buf')
from_wkt = GeomOutput('GEOSGeomFromWKT', [c_char_p])
# Deprecated output routines
to_hex = BinOutput('GEOSGeomToHEX_buf')
to_wkb = BinOutput('GEOSGeomToWKB_buf')
to_wkt = StringFromGeom('GEOSGeomToWKT')
# The GEOS geometry type, typeid, num_coordites and number of geometries
geos_normalize = IntFromGeom('GEOSNormalize')
geos_type = StringFromGeom('GEOSGeomType')
geos_typeid = IntFromGeom('GEOSGeomTypeId')
get_dims = IntFromGeom('GEOSGeom_getDimensions', zero=True)
get_num_coords = IntFromGeom('GEOSGetNumCoordinates')
get_num_geoms = IntFromGeom('GEOSGetNumGeometries')
# Geometry creation factories
create_point = GeomOutput('GEOSGeom_createPoint', [CS_PTR])
create_linestring = GeomOutput('GEOSGeom_createLineString', [CS_PTR])
create_linearring = GeomOutput('GEOSGeom_createLinearRing', [CS_PTR])
# Polygon and collection creation routines are special and will not
# have their argument types defined.
create_polygon = GeomOutput('GEOSGeom_createPolygon', None)
create_collection = GeomOutput('GEOSGeom_createCollection', None)
# Ring routines
get_extring = GeomOutput('GEOSGetExteriorRing', [GEOM_PTR])
get_intring = GeomOutput('GEOSGetInteriorRingN', [GEOM_PTR, c_int])
get_nrings = IntFromGeom('GEOSGetNumInteriorRings')
# Collection Routines
get_geomn = GeomOutput('GEOSGetGeometryN', [GEOM_PTR, c_int])
# Cloning
geom_clone = GEOSFuncFactory('GEOSGeom_clone', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# Destruction routine.
destroy_geom = GEOSFuncFactory('GEOSGeom_destroy', argtypes=[GEOM_PTR])
# SRID routines
geos_get_srid = GEOSFuncFactory('GEOSGetSRID', argtypes=[GEOM_PTR], restype=c_int)
geos_set_srid = GEOSFuncFactory('GEOSSetSRID', argtypes=[GEOM_PTR, c_int])
|
softwaresaved/fat | refs/heads/master | lowfat/migrations/0139_auto_20181211_1005.py | 2 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-11 10:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('lowfat', '0138_auto_20180628_1307'),
]
operations = [
migrations.AddField(
model_name='fund',
name='approver',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='historicalfund',
name='approver',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL),
),
]
|
Mitali-Sodhi/CodeLingo | refs/heads/master | Dataset/python/cursor_manager.py | 18 | # Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DEPRECATED - Different managers to handle when cursors are killed after
they are closed.
New cursor managers should be defined as subclasses of CursorManager and can be
installed on a connection by calling
`pymongo.connection.Connection.set_cursor_manager`.
.. versionchanged:: 2.1+
Deprecated.
"""
import weakref
class CursorManager(object):
"""The default cursor manager.
This manager will kill cursors one at a time as they are closed.
"""
def __init__(self, connection):
"""Instantiate the manager.
:Parameters:
- `connection`: a Mongo Connection
"""
self.__connection = weakref.ref(connection)
def close(self, cursor_id):
"""Close a cursor by killing it immediately.
Raises TypeError if cursor_id is not an instance of (int, long).
:Parameters:
- `cursor_id`: cursor id to close
"""
if not isinstance(cursor_id, (int, long)):
raise TypeError("cursor_id must be an instance of (int, long)")
self.__connection().kill_cursors([cursor_id])
class BatchCursorManager(CursorManager):
"""A cursor manager that kills cursors in batches.
"""
def __init__(self, connection):
"""Instantiate the manager.
:Parameters:
- `connection`: a Mongo Connection
"""
self.__dying_cursors = []
self.__max_dying_cursors = 20
self.__connection = weakref.ref(connection)
CursorManager.__init__(self, connection)
def __del__(self):
"""Cleanup - be sure to kill any outstanding cursors.
"""
self.__connection().kill_cursors(self.__dying_cursors)
def close(self, cursor_id):
"""Close a cursor by killing it in a batch.
Raises TypeError if cursor_id is not an instance of (int, long).
:Parameters:
- `cursor_id`: cursor id to close
"""
if not isinstance(cursor_id, (int, long)):
raise TypeError("cursor_id must be an instance of (int, long)")
self.__dying_cursors.append(cursor_id)
if len(self.__dying_cursors) > self.__max_dying_cursors:
self.__connection().kill_cursors(self.__dying_cursors)
self.__dying_cursors = []
|
ipernet/RatticWeb | refs/heads/master | ratticweb/wsgi.py | 8 | """
WSGI config for ratticweb project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ratticweb.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
RichardLeeK/MachineLearning | refs/heads/master | MachineLearning/CNN_PLATEAU/modelController.py | 1 | import preprocess as pp
import preprocess_image as ppimg
import CNN_data as cd
import numpy as np
from keras.models import model_from_json
from env import Env
def _refine_Y(Y,threshold=0.5): # for sigmoid
for i in range(len(Y)):
cur_y=Y[i]
Y[i]= 1 if cur_y>threshold else 0
return Y
def _decide_Y(Y): # for softmax
result=[]
for cur_y in Y:
if cur_y[0]>cur_y[1]:
result.append(0)
else:
result.append(1)
return np.array(result)
def predict_data(data,model,env,pp=1):
window=env.get_config("CNN","window",type="int")
y_range=env.get_config("CNN","y_range",type="int")
step=env.get_config("CNN","step",type="int")
threshold=env.get_config("test","threshold",type="float")
if pp==1:
data=pp.transform_data(data)
imgset=ppimg.transform_img(data,window=window,y_range=y_range,step=step)
dataX=cd.make_cnn_X(imgset)
pred=model.predict(dataX)
total_len=len(data.T)
pred=_refine_Y(pred,threshold=threshold)
pred=translate_pred(pred,total_len,x_range=window,step=step)
return pred
def translate_pred(pred,total_len,x_range=900,step=60):
result=[0]*total_len
plateau_progress=0
for pidx in range(len(pred)):
p=pred[pidx]
if (p==0) and (plateau_progress==1):
for idx in range((pidx-1)*step,(pidx-1)*step+x_range):
if idx>=total_len:
continue
result[idx]=1
plateau_progress=0
if p==1:
if plateau_progress==0:
plateau_progress=1
else:
for idx in range(pidx*step,pidx*step+x_range):
if idx>=total_len:
continue
result[idx]=1
result=np.array(result)
return result
def save_model(model,env):
'''
save trained model into json & h5
Parameters
----------
model : keras model
trained model
json_path : string
filepath for model
h5_path : string
filepath for weight
'''
json_path=env.get_config("path","model_save_path")
h5_path=env.get_config("path","weight_save_path")
# serialize model to JSON
model_json = model.to_json()
with open(json_path, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(h5_path,overwrite="True")
print("Saved model to disk")
def load_model(env):
'''
load trained model from json & h5
Parameters
----------
json_path : string
filepath for model
h5_path : string
filepath for weight
Returns
-------
model : keras model
trained model
'''
json_path=env.get_config("path","model_load_path")
h5_path=env.get_config("path","weight_load_path")
# load json and create model
json_file = open(json_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(h5_path)
print("Loaded model from disk")
model=loaded_model
return model |
AngelChadni/support-tools | refs/heads/master | googlecode-issues-exporter/github_issue_converter_test.py | 90 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the GitHub Services."""
# pylint: disable=missing-docstring,protected-access
import unittest
import github_services
import issues
from issues_test import DEFAULT_USERNAME
from issues_test import SINGLE_ISSUE
from issues_test import COMMENT_ONE
from issues_test import COMMENT_TWO
from issues_test import COMMENT_THREE
from issues_test import COMMENTS_DATA
from issues_test import NO_ISSUE_DATA
from issues_test import USER_MAP
from issues_test import REPO
# The GitHub username.
GITHUB_USERNAME = DEFAULT_USERNAME
# The GitHub repo name.
GITHUB_REPO = REPO
# The GitHub oauth token.
GITHUB_TOKEN = "oauth_token"
# The URL used for calls to GitHub.
GITHUB_API_URL = "https://api.github.com"
class TestIssueExporter(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self.github_service = github_services.FakeGitHubService(GITHUB_USERNAME,
GITHUB_REPO,
GITHUB_TOKEN)
self.github_user_service = github_services.UserService(
self.github_service)
self.github_issue_service = github_services.IssueService(
self.github_service, comment_delay=0)
self.issue_exporter = issues.IssueExporter(
self.github_issue_service, self.github_user_service,
NO_ISSUE_DATA, GITHUB_REPO, USER_MAP)
self.issue_exporter.Init()
self.TEST_ISSUE_DATA = [
{
"id": "1",
"number": "1",
"title": "Title1",
"state": "open",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO, COMMENT_THREE],
},
"labels": ["Type-Issue", "Priority-High"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User1"
},
},
{
"id": "2",
"number": "2",
"title": "Title2",
"state": "closed",
"owner": {"kind": "projecthosting#issuePerson",
"name": "User2"
},
"labels": [],
"comments": {
"items": [COMMENT_ONE],
},
},
{
"id": "3",
"number": "3",
"title": "Title3",
"state": "closed",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO],
},
"labels": ["Type-Defect"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User3"
}
}]
def testGetAllPreviousIssues(self):
open_issues_response = [{"number": 9, "title": "Title2", "comments": 2}]
closed_issues_response = [{"number": 10, "title": "Title1", "comments": 1}]
self.issue_exporter._issue_json_data = self.TEST_ISSUE_DATA
self.github_service.AddResponse(content=open_issues_response)
self.github_service.AddResponse(content=closed_issues_response)
self.issue_exporter.Init()
index = self.issue_exporter._issue_index
self.assertEqual(3, len(index))
self.assertEqual(1, len(index["Title1"]))
self.assertTrue(index["Title1"][0]["exported"])
self.assertEqual('1', index["Title1"][0]["googlecode_id"])
self.assertEqual(10, index["Title1"][0]["exported_id"])
self.assertEqual(1, index["Title1"][0]["comment_count"])
self.assertEqual(1, len(index["Title2"]))
self.assertTrue(index["Title2"][0]["exported"])
self.assertEqual('2', index["Title2"][0]["googlecode_id"])
self.assertEqual(9, index["Title2"][0]["exported_id"])
self.assertEqual(2, index["Title2"][0]["comment_count"])
self.assertEqual(1, len(index["Title3"]))
self.assertFalse(index["Title3"][0]["exported"])
def testCreateIssue(self):
self.github_service.AddResponse(content={"number": 1234})
issue_number = self.issue_exporter._CreateIssue(SINGLE_ISSUE)
self.assertEqual(1234, issue_number)
def testCreateIssueFailedOpenRequest(self):
self.github_service.AddFailureResponse()
with self.assertRaises(issues.ServiceError):
self.issue_exporter._CreateIssue(SINGLE_ISSUE)
def testCreateIssueFailedCloseRequest(self):
content = {"number": 1234}
self.github_service.AddResponse(content=content)
self.github_service.AddFailureResponse()
issue_number = self.issue_exporter._CreateIssue(SINGLE_ISSUE)
self.assertEqual(1234, issue_number)
def testCreateComments(self):
self.assertEqual(0, self.issue_exporter._comment_number)
self.issue_exporter._CreateComments(COMMENTS_DATA, 1234, SINGLE_ISSUE)
self.assertEqual(4, self.issue_exporter._comment_number)
def testCreateCommentsFailure(self):
self.github_service.AddFailureResponse()
self.assertEqual(0, self.issue_exporter._comment_number)
with self.assertRaises(issues.ServiceError):
self.issue_exporter._CreateComments(COMMENTS_DATA, 1234, SINGLE_ISSUE)
def testStart(self):
self.issue_exporter._issue_json_data = self.TEST_ISSUE_DATA
self.issue_exporter.Init()
# Note: Some responses are from CreateIssues, others are from CreateComment.
self.github_service.AddResponse(content={"number": 1})
self.github_service.AddResponse(content={"number": 10})
self.github_service.AddResponse(content={"number": 11})
self.github_service.AddResponse(content={"number": 2})
self.github_service.AddResponse(content={"number": 20})
self.github_service.AddResponse(content={"number": 3})
self.github_service.AddResponse(content={"number": 30})
self.issue_exporter.Start()
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
# Comment counts are per issue and should match the numbers from the last
# issue created, minus one for the first comment, which is really
# the issue description.
self.assertEqual(1, self.issue_exporter._comment_number)
self.assertEqual(1, self.issue_exporter._comment_total)
def testStart_SkipDeletedComments(self):
comment = {
"content": "one",
"id": 1,
"published": "last year",
"author": {"name": "user@email.com"},
"updates": {
"labels": ["added-label", "-removed-label"],
},
}
self.issue_exporter._issue_json_data = [
{
"id": "1",
"number": "1",
"title": "Title1",
"state": "open",
"comments": {
"items": [
COMMENT_ONE,
comment,
COMMENT_TWO,
comment],
},
"labels": ["Type-Issue", "Priority-High"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User1"
},
}]
self.issue_exporter.Init()
self.github_service.AddResponse(content={"number": 1}) # CreateIssue(...)
self.issue_exporter.Start()
# Remember, the first comment is for the issue.
self.assertEqual(3, self.issue_exporter._comment_number)
self.assertEqual(3, self.issue_exporter._comment_total)
# Set the deletedBy information for the comment object, now they
# should be ignored by the export.
comment["deletedBy"] = {}
self.issue_exporter.Init()
self.github_service.AddResponse(content={"number": 1}) # CreateIssue(...)
self.issue_exporter.Start()
self.assertEqual(1, self.issue_exporter._comment_number)
self.assertEqual(1, self.issue_exporter._comment_total)
def testStart_SkipAlreadyCreatedIssues(self):
self.issue_exporter._issue_json_data = self.TEST_ISSUE_DATA
self.issue_exporter.Init()
self.issue_exporter._issue_index["Title1"][0]["exported"] = True
self.issue_exporter._issue_index["Title1"][0]["comment_count"] = 1
self.issue_exporter._issue_index["Title2"][0]["exported"] = True
self.issue_exporter._issue_index["Title2"][0]["comment_count"] = 2
self.github_service.AddResponse(content={"number": 3}) # CreateIssue(...)
self.github_service.AddResponse(content={"number": 3}) # CreateIssue(...)
self.issue_exporter.Start()
self.assertEqual(2, self.issue_exporter._skipped_issues)
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
def testStart_ReAddMissedComments(self):
self.issue_exporter._issue_json_data = self.TEST_ISSUE_DATA
self.issue_exporter.Init()
# Mark it as exported but missing 2 comments.
self.issue_exporter._issue_index["Title1"][0]["exported"] = True
self.issue_exporter._issue_index["Title1"][0]["comment_count"] = 1
# First requests to re-add comments, then create issues.
self.github_service.AddResponse(content={"number": 11})
self.github_service.AddResponse(content={"number": 12})
self.github_service.AddResponse(content={"number": 2})
self.github_service.AddResponse(content={"number": 3})
self.issue_exporter.Start()
self.assertEqual(1, self.issue_exporter._skipped_issues)
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
if __name__ == "__main__":
unittest.main(buffer=True)
|
IllusionRom-deprecated/android_platform_tools_idea | refs/heads/master | python/testData/codeInsight/smartEnter/comment_after.py | 4 | #comment
#<caret> |
luto/django-import-export | refs/heads/master | import_export/resources.py | 1 | from __future__ import unicode_literals
import functools
from copy import deepcopy
import sys
import traceback
import tablib
from diff_match_patch import diff_match_patch
from django import VERSION
from django.utils.safestring import mark_safe
from django.utils import six
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query import QuerySet
from django.db.transaction import TransactionManagementError
from django.conf import settings
from .results import Error, Result, RowResult
from .fields import Field
from import_export import widgets
from .instance_loaders import (
ModelInstanceLoader,
)
try:
from django.db.transaction import atomic, savepoint, savepoint_rollback, savepoint_commit # noqa
except ImportError:
from .django_compat import atomic, savepoint, savepoint_rollback, savepoint_commit # noqa
if VERSION < (1, 8):
from django.db.models.related import RelatedObject
ForeignObjectRel = RelatedObject
else:
from django.db.models.fields.related import ForeignObjectRel
RelatedObject = None
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
USE_TRANSACTIONS = getattr(settings, 'IMPORT_EXPORT_USE_TRANSACTIONS', False)
class ResourceOptions(object):
"""
The inner Meta class allows for class-level configuration of how the
Resource should behave. The following options are available:
* ``fields`` - Controls what introspected fields the Resource
should include. A whitelist of fields.
* ``exclude`` - Controls what introspected fields the Resource should
NOT include. A blacklist of fields.
* ``model`` - Django Model class. It is used to introspect available
fields.
* ``instance_loader_class`` - Controls which class instance will take
care of loading existing objects.
* ``import_id_fields`` - Controls which object fields will be used to
identify existing instances.
* ``export_order`` - Controls export order for columns.
* ``widgets`` - dictionary defines widget kwargs for fields.
* ``use_transactions`` - Controls if import should use database
transactions. Default value is ``None`` meaning
``settings.IMPORT_EXPORT_USE_TRANSACTIONS`` will be evaluated.
* ``skip_unchanged`` - Controls if the import should skip unchanged records.
Default value is False
* ``report_skipped`` - Controls if the result reports skipped rows
Default value is True
"""
fields = None
model = None
exclude = None
instance_loader_class = None
import_id_fields = ['id']
export_order = None
widgets = None
use_transactions = None
skip_unchanged = False
report_skipped = True
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
declared_fields = []
meta = ResourceOptions()
# If this class is subclassing another Resource, add that Resource's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
for base in bases[::-1]:
if hasattr(base, 'fields'):
declared_fields = list(six.iteritems(base.fields)) + declared_fields
# Collect the Meta options
options = getattr(base, 'Meta', None)
for option in [option for option in dir(options)
if not option.startswith('_')]:
setattr(meta, option, getattr(options, option))
# Add direct fields
for field_name, obj in attrs.copy().items():
if isinstance(obj, Field):
field = attrs.pop(field_name)
if not field.column_name:
field.column_name = field_name
declared_fields.append((field_name, field))
attrs['fields'] = OrderedDict(declared_fields)
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name,
bases, attrs)
# Add direct options
options = getattr(new_class, 'Meta', None)
for option in [option for option in dir(options)
if not option.startswith('_')]:
setattr(meta, option, getattr(options, option))
new_class._meta = meta
return new_class
class Resource(six.with_metaclass(DeclarativeMetaclass)):
"""
Resource defines how objects are mapped to their import and export
representations and handle importing and exporting data.
"""
def get_use_transactions(self):
if self._meta.use_transactions is None:
return USE_TRANSACTIONS
else:
return self._meta.use_transactions
def get_fields(self):
"""
Returns fields in ``export_order`` order.
"""
return [self.fields[f] for f in self.get_export_order()]
@classmethod
def get_field_name(cls, field):
"""
Returns field name for given field.
"""
for field_name, f in cls.fields.items():
if f == field:
return field_name
raise AttributeError("Field %s does not exists in %s resource" % (
field, cls))
def init_instance(self, row=None):
raise NotImplementedError()
def get_instance(self, instance_loader, row):
return instance_loader.get_instance(row)
def get_or_init_instance(self, instance_loader, row):
instance = self.get_instance(instance_loader, row)
if instance:
return (instance, False)
else:
return (self.init_instance(row), True)
def save_instance(self, instance, dry_run=False):
self.before_save_instance(instance, dry_run)
if not dry_run:
instance.save()
self.after_save_instance(instance, dry_run)
def before_save_instance(self, instance, dry_run):
"""
Override to add additional logic.
"""
pass
def after_save_instance(self, instance, dry_run):
"""
Override to add additional logic.
"""
pass
def delete_instance(self, instance, dry_run=False):
self.before_delete_instance(instance, dry_run)
if not dry_run:
instance.delete()
self.after_delete_instance(instance, dry_run)
def before_delete_instance(self, instance, dry_run):
"""
Override to add additional logic.
"""
pass
def after_delete_instance(self, instance, dry_run):
"""
Override to add additional logic.
"""
pass
def import_field(self, field, obj, data):
if field.attribute and field.column_name in data:
field.save(obj, data)
def import_obj(self, obj, data, dry_run):
"""
"""
for field in self.get_fields():
if isinstance(field.widget, widgets.ManyToManyWidget):
continue
self.import_field(field, obj, data)
def save_m2m(self, obj, data, dry_run):
"""
Saves m2m fields.
Model instance need to have a primary key value before
a many-to-many relationship can be used.
"""
if not dry_run:
for field in self.get_fields():
if not isinstance(field.widget, widgets.ManyToManyWidget):
continue
self.import_field(field, obj, data)
def for_delete(self, row, instance):
"""
Returns ``True`` if ``row`` importing should delete instance.
Default implementation returns ``False``.
Override this method to handle deletion.
"""
return False
def skip_row(self, instance, original):
"""
Returns ``True`` if ``row`` importing should be skipped.
Default implementation returns ``False`` unless skip_unchanged == True.
Override this method to handle skipping rows meeting certain conditions.
"""
if not self._meta.skip_unchanged:
return False
for field in self.get_fields():
try:
# For fields that are models.fields.related.ManyRelatedManager
# we need to compare the results
if list(field.get_value(instance).all()) != list(field.get_value(original).all()):
return False
except AttributeError:
if field.get_value(instance) != field.get_value(original):
return False
return True
def get_diff(self, original, current, dry_run=False):
"""
Get diff between original and current object when ``import_data``
is run.
``dry_run`` allows handling special cases when object is not saved
to database (ie. m2m relationships).
"""
data = []
dmp = diff_match_patch()
for field in self.get_fields():
v1 = self.export_field(field, original) if original else ""
v2 = self.export_field(field, current) if current else ""
diff = dmp.diff_main(force_text(v1), force_text(v2))
dmp.diff_cleanupSemantic(diff)
html = dmp.diff_prettyHtml(diff)
html = mark_safe(html)
data.append(html)
return data
def get_diff_headers(self):
"""
Diff representation headers.
"""
return self.get_export_headers()
def before_import(self, dataset, dry_run, **kwargs):
"""
Override to add additional logic.
"""
pass
@atomic()
def import_data(self, dataset, dry_run=False, raise_errors=False,
use_transactions=None, **kwargs):
"""
Imports data from ``dataset``.
``use_transactions``
If ``True`` import process will be processed inside transaction.
If ``dry_run`` is set, or error occurs, transaction will be rolled
back.
"""
result = Result()
result.diff_headers = self.get_diff_headers()
if use_transactions is None:
use_transactions = self.get_use_transactions()
if use_transactions is True:
# when transactions are used we want to create/update/delete object
# as transaction will be rolled back if dry_run is set
real_dry_run = False
sp1 = savepoint()
else:
real_dry_run = dry_run
try:
self.before_import(dataset, real_dry_run, **kwargs)
except Exception as e:
logging.exception(e)
tb_info = traceback.format_exc(2)
result.base_errors.append(Error(repr(e), tb_info))
if raise_errors:
if use_transactions:
savepoint_rollback(sp1)
raise
instance_loader = self._meta.instance_loader_class(self, dataset)
for row in dataset.dict:
try:
row_result = RowResult()
instance, new = self.get_or_init_instance(instance_loader, row)
if new:
row_result.import_type = RowResult.IMPORT_TYPE_NEW
else:
row_result.import_type = RowResult.IMPORT_TYPE_UPDATE
row_result.new_record = new
original = deepcopy(instance)
if self.for_delete(row, instance):
if new:
row_result.import_type = RowResult.IMPORT_TYPE_SKIP
row_result.diff = self.get_diff(None, None,
real_dry_run)
else:
row_result.import_type = RowResult.IMPORT_TYPE_DELETE
self.delete_instance(instance, real_dry_run)
row_result.diff = self.get_diff(original, None,
real_dry_run)
else:
self.import_obj(instance, row, real_dry_run)
if self.skip_row(instance, original):
row_result.import_type = RowResult.IMPORT_TYPE_SKIP
else:
self.save_instance(instance, real_dry_run)
self.save_m2m(instance, row, real_dry_run)
# Add object info to RowResult for LogEntry
row_result.object_repr = force_text(instance)
row_result.object_id = instance.pk
row_result.diff = self.get_diff(original, instance,
real_dry_run)
except Exception as e:
# There is no point logging a transaction error for each row
# when only the original error is likely to be relevant
if not isinstance(e, TransactionManagementError):
logging.exception(e)
tb_info = traceback.format_exc(2)
row_result.errors.append(Error(e, tb_info, row))
if raise_errors:
if use_transactions:
savepoint_rollback(sp1)
six.reraise(*sys.exc_info())
if (row_result.import_type != RowResult.IMPORT_TYPE_SKIP or
self._meta.report_skipped):
result.rows.append(row_result)
if use_transactions:
if dry_run or result.has_errors():
savepoint_rollback(sp1)
else:
savepoint_commit(sp1)
return result
def get_export_order(self):
order = tuple (self._meta.export_order or ())
return order + tuple (k for k in self.fields.keys() if k not in order)
def export_field(self, field, obj):
field_name = self.get_field_name(field)
method = getattr(self, 'dehydrate_%s' % field_name, None)
if method is not None:
return method(obj)
return field.export(obj)
def export_resource(self, obj):
return [self.export_field(field, obj) for field in self.get_fields()]
def get_export_headers(self):
headers = [force_text(field.column_name) for field in self.get_fields()]
return headers
def export(self, queryset=None):
"""
Exports a resource.
"""
if queryset is None:
queryset = self.get_queryset()
headers = self.get_export_headers()
data = tablib.Dataset(headers=headers)
if isinstance(queryset, QuerySet):
# Iterate without the queryset cache, to avoid wasting memory when
# exporting large datasets.
iterable = queryset.iterator()
else:
iterable = queryset
for obj in iterable:
data.append(self.export_resource(obj))
return data
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
new_class = super(ModelDeclarativeMetaclass,
cls).__new__(cls, name, bases, attrs)
opts = new_class._meta
if not opts.instance_loader_class:
opts.instance_loader_class = ModelInstanceLoader
if opts.model:
model_opts = opts.model._meta
declared_fields = new_class.fields
field_list = []
for f in sorted(model_opts.fields + model_opts.many_to_many):
if opts.fields is not None and not f.name in opts.fields:
continue
if opts.exclude and f.name in opts.exclude:
continue
if f.name in declared_fields:
continue
field = new_class.field_from_django_field(f.name, f,
readonly=False)
field_list.append((f.name, field, ))
new_class.fields.update(OrderedDict(field_list))
#add fields that follow relationships
if opts.fields is not None:
field_list = []
for field_name in opts.fields:
if field_name in declared_fields:
continue
if field_name.find('__') == -1:
continue
model = opts.model
attrs = field_name.split('__')
for i, attr in enumerate(attrs):
verbose_path = ".".join([opts.model.__name__] + attrs[0:i+1])
try:
f = model._meta.get_field_by_name(attr)[0]
except FieldDoesNotExist as e:
logging.exception(e)
raise FieldDoesNotExist("%s: %s has no field named '%s'" %
(verbose_path, model.__name__, attr))
if i < len(attrs) - 1:
# We're not at the last attribute yet, so check that
# we're looking at a relation, and move on to the
# next model.
if isinstance(f, ForeignObjectRel):
if RelatedObject is None:
model = f.related_model
else:
# Django < 1.8
model = f.model
else:
if f.rel is None:
raise KeyError('%s is not a relation' % verbose_path)
model = f.rel.to
if isinstance(f, ForeignObjectRel):
f = f.field
field = new_class.field_from_django_field(field_name, f,
readonly=True)
field_list.append((field_name, field))
new_class.fields.update(OrderedDict(field_list))
return new_class
class ModelResource(six.with_metaclass(ModelDeclarativeMetaclass, Resource)):
"""
ModelResource is Resource subclass for handling Django models.
"""
@classmethod
def widget_from_django_field(cls, f, default=widgets.Widget):
"""
Returns the widget that would likely be associated with each
Django type.
"""
result = default
internal_type = f.get_internal_type()
if internal_type in ('ManyToManyField', ):
result = functools.partial(widgets.ManyToManyWidget,
model=f.rel.to)
if internal_type in ('ForeignKey', 'OneToOneField', ):
result = functools.partial(widgets.ForeignKeyWidget,
model=f.rel.to)
if internal_type in ('DecimalField', ):
result = widgets.DecimalWidget
if internal_type in ('DateTimeField', ):
result = widgets.DateTimeWidget
elif internal_type in ('DateField', ):
result = widgets.DateWidget
elif internal_type in ('IntegerField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'SmallIntegerField', 'AutoField'):
result = widgets.IntegerWidget
elif internal_type in ('BooleanField', 'NullBooleanField'):
result = widgets.BooleanWidget
return result
@classmethod
def widget_kwargs_for_field(self, field_name):
"""
Returns widget kwargs for given field_name.
"""
if self._meta.widgets:
return self._meta.widgets.get(field_name, {})
return {}
@classmethod
def field_from_django_field(self, field_name, django_field, readonly):
"""
Returns a Resource Field instance for the given Django model field.
"""
FieldWidget = self.widget_from_django_field(django_field)
widget_kwargs = self.widget_kwargs_for_field(field_name)
field = Field(attribute=field_name, column_name=field_name,
widget=FieldWidget(**widget_kwargs), readonly=readonly)
return field
def get_import_id_fields(self):
return self._meta.import_id_fields
def get_queryset(self):
return self._meta.model.objects.all()
def init_instance(self, row=None):
return self._meta.model()
def modelresource_factory(model, resource_class=ModelResource):
"""
Factory for creating ``ModelResource`` class for given Django model.
"""
attrs = {'model': model}
Meta = type(str('Meta'), (object,), attrs)
class_name = model.__name__ + str('Resource')
class_attrs = {
'Meta': Meta,
}
metaclass = ModelDeclarativeMetaclass
return metaclass(class_name, (resource_class,), class_attrs)
|
factorlibre/server-tools | refs/heads/8.0 | auth_admin_passkey/__init__.py | 61 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Admin Passkey module for OpenERP
# Copyright (C) 2013-2014 GRAP (http://www.grap.coop)
# @author Sylvain LE GAL (https://twitter.com/legalsylvain)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import model
|
monikagrabowska/osf.io | refs/heads/develop | osf/migrations/0029_externalaccount_date_last_refreshed.py | 4 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-19 15:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0028_merge'),
]
operations = [
migrations.AddField(
model_name='externalaccount',
name='date_last_refreshed',
field=models.DateTimeField(blank=True, null=True),
),
]
|
unnikrishnankgs/va | refs/heads/master | venv/lib/python3.5/site-packages/html5lib/treewalkers/_base.py | 61 | from __future__ import absolute_import, division, unicode_literals
from six import text_type, string_types
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
from xml.dom import Node
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
from ..constants import voidElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
def to_text(s, blank_if_none=True):
"""Wrapper around six.text_type to convert None to empty string"""
if s is None:
if blank_if_none:
return ""
else:
return None
elif isinstance(s, text_type):
return s
else:
return text_type(s)
def is_text_or_none(string):
"""Wrapper around isinstance(string_types) or is None"""
return string is None or isinstance(string, string_types)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
yield {"type": "EmptyTag", "name": to_text(name, False),
"namespace": to_text(namespace),
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
return {"type": "StartTag",
"name": text_type(name),
"namespace": to_text(namespace),
"data": dict(((to_text(namespace, False), to_text(name)),
to_text(value, False))
for (namespace, name), value in attrs.items())}
def endTag(self, namespace, name):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(namespace)
return {"type": "EndTag",
"name": to_text(name, False),
"namespace": to_text(namespace),
"data": {}}
def text(self, data):
assert isinstance(data, string_types), type(data)
data = to_text(data)
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
assert isinstance(data, string_types), type(data)
return {"type": "Comment", "data": text_type(data)}
def doctype(self, name, publicId=None, systemId=None, correct=True):
assert is_text_or_none(name), type(name)
assert is_text_or_none(publicId), type(publicId)
assert is_text_or_none(systemId), type(systemId)
return {"type": "Doctype",
"name": to_text(name),
"publicId": to_text(publicId),
"systemId": to_text(systemId),
"correct": to_text(correct)}
def entity(self, name):
assert isinstance(name, string_types), type(name)
return {"type": "Entity", "name": text_type(name)}
def unknown(self, nodeType):
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
|
meabsence/python-for-android | refs/heads/master | python3-alpha/extra_modules/gdata/health/service.py | 263 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HealthService extends GDataService to streamline Google Health API access.
HealthService: Provides methods to interact with the profile, profile list,
and register/notices feeds. Extends GDataService.
HealthProfileQuery: Queries the Google Health Profile feed.
HealthProfileListQuery: Queries the Google Health Profile list feed.
"""
__author__ = 'api.eric@google.com (Eric Bidelman)'
import atom
import gdata.health
import gdata.service
class HealthService(gdata.service.GDataService):
"""Client extension for the Google Health service Document List feed."""
def __init__(self, email=None, password=None, source=None,
use_h9_sandbox=False, server='www.google.com',
additional_headers=None, **kwargs):
"""Creates a client for the Google Health service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
use_h9_sandbox: boolean (optional) True to issue requests against the
/h9 developer's sandbox.
server: string (optional) The name of the server to which a connection
will be opened.
additional_headers: dictionary (optional) Any additional headers which
should be included with CRUD operations.
kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
service = use_h9_sandbox and 'weaver' or 'health'
gdata.service.GDataService.__init__(
self, email=email, password=password, service=service, source=source,
server=server, additional_headers=additional_headers, **kwargs)
self.ssl = True
self.use_h9_sandbox = use_h9_sandbox
def __get_service(self):
return self.use_h9_sandbox and 'h9' or 'health'
def GetProfileFeed(self, query=None, profile_id=None):
"""Fetches the users Google Health profile feed.
Args:
query: HealthProfileQuery or string (optional) A query to use on the
profile feed. If None, a HealthProfileQuery is constructed.
profile_id: string (optional) The profile id to query the profile feed
with when using ClientLogin. Note: this parameter is ignored if
query is set.
Returns:
A gdata.health.ProfileFeed object containing the user's Health profile.
"""
if query is None:
projection = profile_id and 'ui' or 'default'
uri = HealthProfileQuery(
service=self.__get_service(), projection=projection,
profile_id=profile_id).ToUri()
elif isinstance(query, HealthProfileQuery):
uri = query.ToUri()
else:
uri = query
return self.GetFeed(uri, converter=gdata.health.ProfileFeedFromString)
def GetProfileListFeed(self, query=None):
"""Fetches the users Google Health profile feed.
Args:
query: HealthProfileListQuery or string (optional) A query to use
on the profile list feed. If None, a HealthProfileListQuery is
constructed to /health/feeds/profile/list or /h9/feeds/profile/list.
Returns:
A gdata.health.ProfileListFeed object containing the user's list
of profiles.
"""
if not query:
uri = HealthProfileListQuery(service=self.__get_service()).ToUri()
elif isinstance(query, HealthProfileListQuery):
uri = query.ToUri()
else:
uri = query
return self.GetFeed(uri, converter=gdata.health.ProfileListFeedFromString)
def SendNotice(self, subject, body=None, content_type='html',
ccr=None, profile_id=None):
"""Sends (posts) a notice to the user's Google Health profile.
Args:
subject: A string representing the message's subject line.
body: string (optional) The message body.
content_type: string (optional) The content type of the notice message
body. This parameter is only honored when a message body is
specified.
ccr: string (optional) The CCR XML document to reconcile into the
user's profile.
profile_id: string (optional) The profile id to work with when using
ClientLogin. Note: this parameter is ignored if query is set.
Returns:
A gdata.health.ProfileEntry object of the posted entry.
"""
if body:
content = atom.Content(content_type=content_type, text=body)
else:
content = body
entry = gdata.GDataEntry(
title=atom.Title(text=subject), content=content,
extension_elements=[atom.ExtensionElementFromString(ccr)])
projection = profile_id and 'ui' or 'default'
query = HealthRegisterQuery(service=self.__get_service(),
projection=projection, profile_id=profile_id)
return self.Post(entry, query.ToUri(),
converter=gdata.health.ProfileEntryFromString)
class HealthProfileQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Health profile feed."""
def __init__(self, service='health', feed='feeds/profile',
projection='default', profile_id=None, text_query=None,
params=None, categories=None):
"""Constructor for Health profile feed query.
Args:
service: string (optional) The service to query. Either 'health' or 'h9'.
feed: string (optional) The path for the feed. The default value is
'feeds/profile'.
projection: string (optional) The visibility of the data. Possible values
are 'default' for AuthSub and 'ui' for ClientLogin. If this value
is set to 'ui', the profile_id parameter should also be set.
profile_id: string (optional) The profile id to query. This should only
be used when using ClientLogin.
text_query: str (optional) The contents of the q query parameter. The
contents of the text_query are URL escaped upon conversion to a URI.
Note: this parameter can only be used on the register feed using
ClientLogin.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
"""
self.service = service
self.profile_id = profile_id
self.projection = projection
gdata.service.Query.__init__(self, feed=feed, text_query=text_query,
params=params, categories=categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Health
profile feed.
"""
old_feed = self.feed
self.feed = '/'.join([self.service, old_feed, self.projection])
if self.profile_id:
self.feed += '/' + self.profile_id
self.feed = '/%s' % (self.feed,)
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
class HealthProfileListQuery(gdata.service.Query):
"""Object used to construct a URI to query a Health profile list feed."""
def __init__(self, service='health', feed='feeds/profile/list'):
"""Constructor for Health profile list feed query.
Args:
service: string (optional) The service to query. Either 'health' or 'h9'.
feed: string (optional) The path for the feed. The default value is
'feeds/profile/list'.
"""
gdata.service.Query.__init__(self, feed)
self.service = service
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the
profile list feed.
"""
return '/%s' % ('/'.join([self.service, self.feed]),)
class HealthRegisterQuery(gdata.service.Query):
"""Object used to construct a URI to query a Health register/notice feed."""
def __init__(self, service='health', feed='feeds/register',
projection='default', profile_id=None):
"""Constructor for Health profile list feed query.
Args:
service: string (optional) The service to query. Either 'health' or 'h9'.
feed: string (optional) The path for the feed. The default value is
'feeds/register'.
projection: string (optional) The visibility of the data. Possible values
are 'default' for AuthSub and 'ui' for ClientLogin. If this value
is set to 'ui', the profile_id parameter should also be set.
profile_id: string (optional) The profile id to query. This should only
be used when using ClientLogin.
"""
gdata.service.Query.__init__(self, feed)
self.service = service
self.projection = projection
self.profile_id = profile_id
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI needed to interact with the register feed.
"""
old_feed = self.feed
self.feed = '/'.join([self.service, old_feed, self.projection])
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
if self.profile_id:
new_feed += '/' + self.profile_id
return '/%s' % (new_feed,)
|
extremewaysback/django | refs/heads/master | tests/cache/closeable_cache.py | 446 | from django.core.cache.backends.locmem import LocMemCache
class CloseHookMixin(object):
closed = False
def close(self, **kwargs):
self.closed = True
class CacheClass(CloseHookMixin, LocMemCache):
pass
|
facebook/buck | refs/heads/master | test/com/facebook/buck/features/python/testdata/prebuilt_package/package/file.py | 10 | def say_hi():
print("Hello there!")
|
MrNuggles/HeyBoet-Telegram-Bot | refs/heads/master | temboo/Library/Amazon/IAM/__init__.py | 5 | from temboo.Library.Amazon.IAM.AddRoleToInstanceProfile import AddRoleToInstanceProfile, AddRoleToInstanceProfileInputSet, AddRoleToInstanceProfileResultSet, AddRoleToInstanceProfileChoreographyExecution
from temboo.Library.Amazon.IAM.AddUserToGroup import AddUserToGroup, AddUserToGroupInputSet, AddUserToGroupResultSet, AddUserToGroupChoreographyExecution
from temboo.Library.Amazon.IAM.ChangePassword import ChangePassword, ChangePasswordInputSet, ChangePasswordResultSet, ChangePasswordChoreographyExecution
from temboo.Library.Amazon.IAM.CreateAccessKey import CreateAccessKey, CreateAccessKeyInputSet, CreateAccessKeyResultSet, CreateAccessKeyChoreographyExecution
from temboo.Library.Amazon.IAM.CreateAccountAlias import CreateAccountAlias, CreateAccountAliasInputSet, CreateAccountAliasResultSet, CreateAccountAliasChoreographyExecution
from temboo.Library.Amazon.IAM.CreateGroup import CreateGroup, CreateGroupInputSet, CreateGroupResultSet, CreateGroupChoreographyExecution
from temboo.Library.Amazon.IAM.CreateInstanceProfile import CreateInstanceProfile, CreateInstanceProfileInputSet, CreateInstanceProfileResultSet, CreateInstanceProfileChoreographyExecution
from temboo.Library.Amazon.IAM.CreateLoginProfile import CreateLoginProfile, CreateLoginProfileInputSet, CreateLoginProfileResultSet, CreateLoginProfileChoreographyExecution
from temboo.Library.Amazon.IAM.CreateUser import CreateUser, CreateUserInputSet, CreateUserResultSet, CreateUserChoreographyExecution
from temboo.Library.Amazon.IAM.CreateVirtualMFADevice import CreateVirtualMFADevice, CreateVirtualMFADeviceInputSet, CreateVirtualMFADeviceResultSet, CreateVirtualMFADeviceChoreographyExecution
from temboo.Library.Amazon.IAM.DeactivateMFADevice import DeactivateMFADevice, DeactivateMFADeviceInputSet, DeactivateMFADeviceResultSet, DeactivateMFADeviceChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteAccessKey import DeleteAccessKey, DeleteAccessKeyInputSet, DeleteAccessKeyResultSet, DeleteAccessKeyChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteAccountAlias import DeleteAccountAlias, DeleteAccountAliasInputSet, DeleteAccountAliasResultSet, DeleteAccountAliasChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteAccountPasswordPolicy import DeleteAccountPasswordPolicy, DeleteAccountPasswordPolicyInputSet, DeleteAccountPasswordPolicyResultSet, DeleteAccountPasswordPolicyChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteGroup import DeleteGroup, DeleteGroupInputSet, DeleteGroupResultSet, DeleteGroupChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteGroupPolicy import DeleteGroupPolicy, DeleteGroupPolicyInputSet, DeleteGroupPolicyResultSet, DeleteGroupPolicyChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteInstanceProfile import DeleteInstanceProfile, DeleteInstanceProfileInputSet, DeleteInstanceProfileResultSet, DeleteInstanceProfileChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteLoginProfile import DeleteLoginProfile, DeleteLoginProfileInputSet, DeleteLoginProfileResultSet, DeleteLoginProfileChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteRole import DeleteRole, DeleteRoleInputSet, DeleteRoleResultSet, DeleteRoleChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteRolePolicy import DeleteRolePolicy, DeleteRolePolicyInputSet, DeleteRolePolicyResultSet, DeleteRolePolicyChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteServerCertificate import DeleteServerCertificate, DeleteServerCertificateInputSet, DeleteServerCertificateResultSet, DeleteServerCertificateChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteSigningCertificate import DeleteSigningCertificate, DeleteSigningCertificateInputSet, DeleteSigningCertificateResultSet, DeleteSigningCertificateChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteUser import DeleteUser, DeleteUserInputSet, DeleteUserResultSet, DeleteUserChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteUserPolicy import DeleteUserPolicy, DeleteUserPolicyInputSet, DeleteUserPolicyResultSet, DeleteUserPolicyChoreographyExecution
from temboo.Library.Amazon.IAM.DeleteVirtualMFADevice import DeleteVirtualMFADevice, DeleteVirtualMFADeviceInputSet, DeleteVirtualMFADeviceResultSet, DeleteVirtualMFADeviceChoreographyExecution
from temboo.Library.Amazon.IAM.EnableMFADevice import EnableMFADevice, EnableMFADeviceInputSet, EnableMFADeviceResultSet, EnableMFADeviceChoreographyExecution
from temboo.Library.Amazon.IAM.GetAccountPasswordPolicy import GetAccountPasswordPolicy, GetAccountPasswordPolicyInputSet, GetAccountPasswordPolicyResultSet, GetAccountPasswordPolicyChoreographyExecution
from temboo.Library.Amazon.IAM.GetAccountSummary import GetAccountSummary, GetAccountSummaryInputSet, GetAccountSummaryResultSet, GetAccountSummaryChoreographyExecution
from temboo.Library.Amazon.IAM.GetGroup import GetGroup, GetGroupInputSet, GetGroupResultSet, GetGroupChoreographyExecution
from temboo.Library.Amazon.IAM.GetGroupPolicy import GetGroupPolicy, GetGroupPolicyInputSet, GetGroupPolicyResultSet, GetGroupPolicyChoreographyExecution
from temboo.Library.Amazon.IAM.GetInstanceProfile import GetInstanceProfile, GetInstanceProfileInputSet, GetInstanceProfileResultSet, GetInstanceProfileChoreographyExecution
from temboo.Library.Amazon.IAM.GetLoginProfile import GetLoginProfile, GetLoginProfileInputSet, GetLoginProfileResultSet, GetLoginProfileChoreographyExecution
from temboo.Library.Amazon.IAM.GetRole import GetRole, GetRoleInputSet, GetRoleResultSet, GetRoleChoreographyExecution
from temboo.Library.Amazon.IAM.GetRolePolicy import GetRolePolicy, GetRolePolicyInputSet, GetRolePolicyResultSet, GetRolePolicyChoreographyExecution
from temboo.Library.Amazon.IAM.GetServerCertificate import GetServerCertificate, GetServerCertificateInputSet, GetServerCertificateResultSet, GetServerCertificateChoreographyExecution
from temboo.Library.Amazon.IAM.GetUser import GetUser, GetUserInputSet, GetUserResultSet, GetUserChoreographyExecution
from temboo.Library.Amazon.IAM.GetUserPolicy import GetUserPolicy, GetUserPolicyInputSet, GetUserPolicyResultSet, GetUserPolicyChoreographyExecution
from temboo.Library.Amazon.IAM.ListAccessKeys import ListAccessKeys, ListAccessKeysInputSet, ListAccessKeysResultSet, ListAccessKeysChoreographyExecution
from temboo.Library.Amazon.IAM.ListAccountAliases import ListAccountAliases, ListAccountAliasesInputSet, ListAccountAliasesResultSet, ListAccountAliasesChoreographyExecution
from temboo.Library.Amazon.IAM.ListGroupPolicies import ListGroupPolicies, ListGroupPoliciesInputSet, ListGroupPoliciesResultSet, ListGroupPoliciesChoreographyExecution
from temboo.Library.Amazon.IAM.ListGroups import ListGroups, ListGroupsInputSet, ListGroupsResultSet, ListGroupsChoreographyExecution
from temboo.Library.Amazon.IAM.ListGroupsForUser import ListGroupsForUser, ListGroupsForUserInputSet, ListGroupsForUserResultSet, ListGroupsForUserChoreographyExecution
from temboo.Library.Amazon.IAM.ListInstanceProfiles import ListInstanceProfiles, ListInstanceProfilesInputSet, ListInstanceProfilesResultSet, ListInstanceProfilesChoreographyExecution
from temboo.Library.Amazon.IAM.ListInstanceProfilesForRole import ListInstanceProfilesForRole, ListInstanceProfilesForRoleInputSet, ListInstanceProfilesForRoleResultSet, ListInstanceProfilesForRoleChoreographyExecution
from temboo.Library.Amazon.IAM.ListMFADevices import ListMFADevices, ListMFADevicesInputSet, ListMFADevicesResultSet, ListMFADevicesChoreographyExecution
from temboo.Library.Amazon.IAM.ListRolePolicies import ListRolePolicies, ListRolePoliciesInputSet, ListRolePoliciesResultSet, ListRolePoliciesChoreographyExecution
from temboo.Library.Amazon.IAM.ListRoles import ListRoles, ListRolesInputSet, ListRolesResultSet, ListRolesChoreographyExecution
from temboo.Library.Amazon.IAM.ListServerCertificates import ListServerCertificates, ListServerCertificatesInputSet, ListServerCertificatesResultSet, ListServerCertificatesChoreographyExecution
from temboo.Library.Amazon.IAM.ListSigningCertificates import ListSigningCertificates, ListSigningCertificatesInputSet, ListSigningCertificatesResultSet, ListSigningCertificatesChoreographyExecution
from temboo.Library.Amazon.IAM.ListUserPolicies import ListUserPolicies, ListUserPoliciesInputSet, ListUserPoliciesResultSet, ListUserPoliciesChoreographyExecution
from temboo.Library.Amazon.IAM.ListUsers import ListUsers, ListUsersInputSet, ListUsersResultSet, ListUsersChoreographyExecution
from temboo.Library.Amazon.IAM.ListVirtualMFADevices import ListVirtualMFADevices, ListVirtualMFADevicesInputSet, ListVirtualMFADevicesResultSet, ListVirtualMFADevicesChoreographyExecution
from temboo.Library.Amazon.IAM.PutGroupPolicy import PutGroupPolicy, PutGroupPolicyInputSet, PutGroupPolicyResultSet, PutGroupPolicyChoreographyExecution
from temboo.Library.Amazon.IAM.PutRolePolicy import PutRolePolicy, PutRolePolicyInputSet, PutRolePolicyResultSet, PutRolePolicyChoreographyExecution
from temboo.Library.Amazon.IAM.PutUserPolicy import PutUserPolicy, PutUserPolicyInputSet, PutUserPolicyResultSet, PutUserPolicyChoreographyExecution
from temboo.Library.Amazon.IAM.RemoveRoleFromInstanceProfile import RemoveRoleFromInstanceProfile, RemoveRoleFromInstanceProfileInputSet, RemoveRoleFromInstanceProfileResultSet, RemoveRoleFromInstanceProfileChoreographyExecution
from temboo.Library.Amazon.IAM.RemoveUserFromGroup import RemoveUserFromGroup, RemoveUserFromGroupInputSet, RemoveUserFromGroupResultSet, RemoveUserFromGroupChoreographyExecution
from temboo.Library.Amazon.IAM.ResyncMFADevice import ResyncMFADevice, ResyncMFADeviceInputSet, ResyncMFADeviceResultSet, ResyncMFADeviceChoreographyExecution
from temboo.Library.Amazon.IAM.UpdateAccessKey import UpdateAccessKey, UpdateAccessKeyInputSet, UpdateAccessKeyResultSet, UpdateAccessKeyChoreographyExecution
from temboo.Library.Amazon.IAM.UpdateAccountPasswordPolicy import UpdateAccountPasswordPolicy, UpdateAccountPasswordPolicyInputSet, UpdateAccountPasswordPolicyResultSet, UpdateAccountPasswordPolicyChoreographyExecution
from temboo.Library.Amazon.IAM.UpdateGroup import UpdateGroup, UpdateGroupInputSet, UpdateGroupResultSet, UpdateGroupChoreographyExecution
from temboo.Library.Amazon.IAM.UpdateLoginProfile import UpdateLoginProfile, UpdateLoginProfileInputSet, UpdateLoginProfileResultSet, UpdateLoginProfileChoreographyExecution
from temboo.Library.Amazon.IAM.UpdateServerCertificate import UpdateServerCertificate, UpdateServerCertificateInputSet, UpdateServerCertificateResultSet, UpdateServerCertificateChoreographyExecution
from temboo.Library.Amazon.IAM.UpdateSigningCertificate import UpdateSigningCertificate, UpdateSigningCertificateInputSet, UpdateSigningCertificateResultSet, UpdateSigningCertificateChoreographyExecution
from temboo.Library.Amazon.IAM.UpdateUser import UpdateUser, UpdateUserInputSet, UpdateUserResultSet, UpdateUserChoreographyExecution
from temboo.Library.Amazon.IAM.UploadServerCertificate import UploadServerCertificate, UploadServerCertificateInputSet, UploadServerCertificateResultSet, UploadServerCertificateChoreographyExecution
from temboo.Library.Amazon.IAM.UploadSigningCertificate import UploadSigningCertificate, UploadSigningCertificateInputSet, UploadSigningCertificateResultSet, UploadSigningCertificateChoreographyExecution
|
kiwicopple/MyMDb | refs/heads/master | venv/Lib/encodings/ptcp154.py | 647 | """ Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ptcp154',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0496, # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
0x0081: 0x0492, # CYRILLIC CAPITAL LETTER GHE WITH STROKE
0x0082: 0x04ee, # CYRILLIC CAPITAL LETTER U WITH MACRON
0x0083: 0x0493, # CYRILLIC SMALL LETTER GHE WITH STROKE
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x04b6, # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
0x0087: 0x04ae, # CYRILLIC CAPITAL LETTER STRAIGHT U
0x0088: 0x04b2, # CYRILLIC CAPITAL LETTER HA WITH DESCENDER
0x0089: 0x04af, # CYRILLIC SMALL LETTER STRAIGHT U
0x008a: 0x04a0, # CYRILLIC CAPITAL LETTER BASHKIR KA
0x008b: 0x04e2, # CYRILLIC CAPITAL LETTER I WITH MACRON
0x008c: 0x04a2, # CYRILLIC CAPITAL LETTER EN WITH DESCENDER
0x008d: 0x049a, # CYRILLIC CAPITAL LETTER KA WITH DESCENDER
0x008e: 0x04ba, # CYRILLIC CAPITAL LETTER SHHA
0x008f: 0x04b8, # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
0x0090: 0x0497, # CYRILLIC SMALL LETTER ZHE WITH DESCENDER
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x04b3, # CYRILLIC SMALL LETTER HA WITH DESCENDER
0x0099: 0x04b7, # CYRILLIC SMALL LETTER CHE WITH DESCENDER
0x009a: 0x04a1, # CYRILLIC SMALL LETTER BASHKIR KA
0x009b: 0x04e3, # CYRILLIC SMALL LETTER I WITH MACRON
0x009c: 0x04a3, # CYRILLIC SMALL LETTER EN WITH DESCENDER
0x009d: 0x049b, # CYRILLIC SMALL LETTER KA WITH DESCENDER
0x009e: 0x04bb, # CYRILLIC SMALL LETTER SHHA
0x009f: 0x04b9, # CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE
0x00a1: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U (Byelorussian)
0x00a2: 0x045e, # CYRILLIC SMALL LETTER SHORT U (Byelorussian)
0x00a3: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x00a4: 0x04e8, # CYRILLIC CAPITAL LETTER BARRED O
0x00a5: 0x0498, # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
0x00a6: 0x04b0, # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
0x00a8: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00aa: 0x04d8, # CYRILLIC CAPITAL LETTER SCHWA
0x00ad: 0x04ef, # CYRILLIC SMALL LETTER U WITH MACRON
0x00af: 0x049c, # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
0x00b1: 0x04b1, # CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
0x00b2: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b3: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b4: 0x0499, # CYRILLIC SMALL LETTER ZE WITH DESCENDER
0x00b5: 0x04e9, # CYRILLIC SMALL LETTER BARRED O
0x00b8: 0x0451, # CYRILLIC SMALL LETTER IO
0x00b9: 0x2116, # NUMERO SIGN
0x00ba: 0x04d9, # CYRILLIC SMALL LETTER SCHWA
0x00bc: 0x0458, # CYRILLIC SMALL LETTER JE
0x00bd: 0x04aa, # CYRILLIC CAPITAL LETTER ES WITH DESCENDER
0x00be: 0x04ab, # CYRILLIC SMALL LETTER ES WITH DESCENDER
0x00bf: 0x049d, # CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE
0x00c0: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00c1: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00c2: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00c3: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00c4: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00c5: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00c6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00c7: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00c8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00c9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00ca: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00cb: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00cc: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00cd: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00ce: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00cf: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00d0: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00d1: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00d2: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00d3: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00d4: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00d5: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00d6: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00d7: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00d8: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00d9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00da: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00db: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00dc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00dd: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00de: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x00df: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e0: 0x0430, # CYRILLIC SMALL LETTER A
0x00e1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00e2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00e3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00e4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00e5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00e6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00e7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00e8: 0x0438, # CYRILLIC SMALL LETTER I
0x00e9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00ea: 0x043a, # CYRILLIC SMALL LETTER KA
0x00eb: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ec: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ed: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ee: 0x043e, # CYRILLIC SMALL LETTER O
0x00ef: 0x043f, # CYRILLIC SMALL LETTER PE
0x00f0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00f1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00f2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00f3: 0x0443, # CYRILLIC SMALL LETTER U
0x00f4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00f5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00f6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00f7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00f8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00fb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00fc: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00fd: 0x044d, # CYRILLIC SMALL LETTER E
0x00fe: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ff: 0x044f, # CYRILLIC SMALL LETTER YA
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
zhangrj/Learn_Python | refs/heads/master | string.py | 1 | #--coding:utf-8--
x = "There are %d types of people." %10
binary = "binary"
do_not = "don't"
y = "Those who know %s and those who %s." %(binary,do_not)
print x
print y
print "I said:%r." %x
print "I also said:'%s'." %y
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
print joke_evaluation %hilarious
w = "This is"
e = " an egg."
print w+e |
darjeeling/django | refs/heads/master | tests/template_tests/syntax_tests/test_exceptions.py | 513 | from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
from .test_extends import inheritance_templates
class ExceptionsTests(SimpleTestCase):
@setup({'exception01': "{% extends 'nonexistent' %}"})
def test_exception01(self):
"""
Raise exception for invalid template name
"""
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception01')
@setup({'exception02': '{% extends nonexistent %}'})
def test_exception02(self):
"""
Raise exception for invalid variable template name
"""
if self.engine.string_if_invalid:
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception02')
else:
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception02')
@setup(
{'exception03': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% extends 'inheritance16' %}"},
inheritance_templates,
)
def test_exception03(self):
"""
Raise exception for extra {% extends %} tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception03')
@setup(
{'exception04': "{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}"},
inheritance_templates,
)
def test_exception04(self):
"""
Raise exception for custom tags used in child with {% load %} tag in parent, not in child
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception04')
@setup({'exception05': '{% block first %}{{ block.super }}{% endblock %}'})
def test_exception05(self):
"""
Raise exception for block.super used in base template
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception05')
|
nyuwireless/ns3-mmwave | refs/heads/master | src/bridge/examples/csma-bridge.py | 171 | # /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Network topology
#
# n0 n1
# | |
# ----------
# | Switch |
# ----------
# | |
# n2 n3
#
#
# - CBR/UDP flows from n0 to n1 and from n3 to n0
# - DropTail queues
# - Tracing of queues and packet receptions to file "csma-bridge.tr"
import ns.applications
import ns.bridge
import ns.core
import ns.csma
import ns.internet
import ns.network
def main(argv):
#
# Allow the user to override any of the defaults and the above Bind() at
# run-time, via command-line arguments
#
cmd = ns.core.CommandLine()
cmd.Parse(argv)
#
# Explicitly create the nodes required by the topology(shown above).
#
#print "Create nodes."
terminals = ns.network.NodeContainer()
terminals.Create(4)
csmaSwitch = ns.network.NodeContainer()
csmaSwitch.Create(1)
#print "Build Topology"
csma = ns.csma.CsmaHelper()
csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000)))
csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2)))
# Create the csma links, from each terminal to the switch
terminalDevices = ns.network.NetDeviceContainer()
switchDevices = ns.network.NetDeviceContainer()
for i in range(4):
link = csma.Install(ns.network.NodeContainer(ns.network.NodeContainer(terminals.Get(i)), csmaSwitch))
terminalDevices.Add(link.Get(0))
switchDevices.Add(link.Get(1))
# Create the bridge netdevice, which will do the packet switching
switchNode = csmaSwitch.Get(0)
bridgeDevice = ns.bridge.BridgeNetDevice()
switchNode.AddDevice(bridgeDevice)
for portIter in range(switchDevices.GetN()):
bridgeDevice.AddBridgePort(switchDevices.Get(portIter))
# Add internet stack to the terminals
internet = ns.internet.InternetStackHelper()
internet.Install(terminals)
# We've got the "hardware" in place. Now we need to add IP addresses.
#
#print "Assign IP Addresses."
ipv4 = ns.internet.Ipv4AddressHelper()
ipv4.SetBase(ns.network.Ipv4Address("10.1.1.0"), ns.network.Ipv4Mask("255.255.255.0"))
ipv4.Assign(terminalDevices)
#
# Create an OnOff application to send UDP datagrams from node zero to node 1.
#
#print "Create Applications."
port = 9 # Discard port(RFC 863)
onoff = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.1.1.2"), port)))
onoff.SetConstantRate (ns.network.DataRate ("500kb/s"))
app = onoff.Install(ns.network.NodeContainer(terminals.Get(0)))
# Start the application
app.Start(ns.core.Seconds(1.0))
app.Stop(ns.core.Seconds(10.0))
# Create an optional packet sink to receive these packets
sink = ns.applications.PacketSinkHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), port)))
app = sink.Install(ns.network.NodeContainer(terminals.Get(1)))
app.Start(ns.core.Seconds(0.0))
#
# Create a similar flow from n3 to n0, starting at time 1.1 seconds
#
onoff.SetAttribute("Remote",
ns.network.AddressValue(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.1.1.1"), port)))
app = onoff.Install(ns.network.NodeContainer(terminals.Get(3)))
app.Start(ns.core.Seconds(1.1))
app.Stop(ns.core.Seconds(10.0))
app = sink.Install(ns.network.NodeContainer(terminals.Get(0)))
app.Start(ns.core.Seconds(0.0))
#
# Configure tracing of all enqueue, dequeue, and NetDevice receive events.
# Trace output will be sent to the file "csma-bridge.tr"
#
#print "Configure Tracing."
#ascii = ns.network.AsciiTraceHelper();
#csma.EnableAsciiAll(ascii.CreateFileStream ("csma-bridge.tr"));
#
# Also configure some tcpdump traces; each interface will be traced.
# The output files will be named:
# csma-bridge.pcap-<nodeId>-<interfaceId>
# and can be read by the "tcpdump -r" command(use "-tt" option to
# display timestamps correctly)
#
csma.EnablePcapAll("csma-bridge", False)
#
# Now, do the actual simulation.
#
#print "Run Simulation."
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
#print "Done."
if __name__ == '__main__':
import sys
main(sys.argv)
|
Bugheist/website | refs/heads/master | website/models.py | 1 | import os
from urllib.parse import urlparse
import requests
import tweepy
from PIL import Image
from annoying.fields import AutoOneToOneField
from colorthief import ColorThief
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.db import models
from django.db.models import Count
from django.db.models import signals
from django.db.models.signals import post_save
from unidecode import unidecode
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
class Domain(models.Model):
name = models.CharField(max_length=255, unique=True)
url = models.URLField()
logo = models.ImageField(upload_to="logos", null=True, blank=True)
webshot = models.ImageField(upload_to="webshots", null=True, blank=True)
clicks = models.IntegerField(null=True, blank=True)
email_event = models.CharField(max_length=255, default="", null=True, blank=True)
color = models.CharField(max_length=10, null=True, blank=True)
github = models.CharField(max_length=255, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
twitter = models.CharField(max_length=30, null=True, blank=True)
facebook = models.URLField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
@property
def open_issues(self):
return Issue.objects.filter(domain=self).exclude(status="closed")
@property
def closed_issues(self):
return Issue.objects.filter(domain=self).filter(status="closed")
@property
def top_tester(self):
return User.objects.filter(issue__domain=self).annotate(total=Count('issue')).order_by('-total').first()
@property
def get_name(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
def get_logo(self):
if self.logo:
return self.logo.url
image_request = requests.get("https://logo.clearbit.com/" + self.name)
try:
if image_request.status_code == 200:
image_content = ContentFile(image_request.content)
self.logo.save(self.name + ".jpg", image_content)
return self.logo.url
except:
favicon_url = self.url + '/favicon.ico'
return favicon_url
@property
def get_color(self):
if self.color:
return self.color
else:
if not self.logo:
self.get_logo()
try:
color_thief = ColorThief(self.logo)
self.color = '#%02x%02x%02x' % color_thief.get_color(quality=1)
except:
self.color = "#0000ff"
self.save()
return self.color
@property
def hostname_domain(self):
parsed_url = urlparse(self.url)
return parsed_url.hostname
@property
def domain_name(self):
parsed_url = urlparse(self.url)
domain = parsed_url.hostname
temp = domain.rsplit('.')
if (len(temp) == 3):
domain = temp[1] + '.' + temp[2]
return domain
def get_absolute_url(self):
return "/domain/" + self.name
def validate_image(fieldfile_obj):
filesize = fieldfile_obj.file.size
megabyte_limit = 3.0
if filesize > megabyte_limit * 1024 * 1024:
raise ValidationError("Max file size is %sMB" % str(megabyte_limit))
class Issue(models.Model):
labels = (
(0, 'General'),
(1, 'Number Error'),
(2, 'Functional'),
(3, 'Performance'),
(4, 'Security'),
(5, 'Typo'),
(6, 'Design')
)
user = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE)
domain = models.ForeignKey(Domain, null=True, blank=True, on_delete=models.CASCADE)
url = models.URLField()
description = models.TextField()
label = models.PositiveSmallIntegerField(choices=labels, default=0)
views = models.IntegerField(null=True, blank=True)
status = models.CharField(max_length=10, default="open", null=True, blank=True)
user_agent = models.CharField(max_length=255, default="", null=True, blank=True)
ocr = models.TextField(default="", null=True, blank=True)
screenshot = models.ImageField(upload_to="screenshots", validators=[validate_image])
closed_by = models.ForeignKey(User, null=True, blank=True, related_name="closed_by", on_delete=models.CASCADE)
closed_date = models.DateTimeField(default=None, null=True, blank=True)
github_url = models.URLField(default="", null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.description
@property
def domain_title(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
@property
def hostname_domain(self):
parsed_url = urlparse(self.url)
return parsed_url.hostname
@property
def domain_name(self):
parsed_url = urlparse(self.url)
domain = parsed_url.hostname
temp = domain.rsplit('.')
if (len(temp) == 3):
domain = temp[1] + '.' + temp[2]
return domain
def get_twitter_message(self):
issue_link = " bugheist.com/issue/" + str(self.id)
prefix = "Bug found on @"
spacer = " | "
msg = prefix + self.domain_title + spacer + self.description[:140 - (
len(prefix) + len(self.domain_title) + len(spacer) + len(issue_link))] + issue_link
return msg
def get_ocr(self):
if self.ocr:
return self.ocr
else:
try:
import pytesseract
self.ocr = pytesseract.image_to_string(Image.open(self.screenshot))
self.save()
return self.ocr
except:
return "OCR not installed"
@property
def get_absolute_url(self):
return "/issue/" + str(self.id)
class Meta:
ordering = ['-created']
TWITTER_MAXLENGTH = getattr(settings, 'TWITTER_MAXLENGTH', 140)
def post_to_twitter(sender, instance, *args, **kwargs):
if not kwargs.get('created'):
return False
try:
consumer_key = os.environ['TWITTER_CONSUMER_KEY']
consumer_secret = os.environ['TWITTER_CONSUMER_SECRET']
access_key = os.environ['TWITTER_ACCESS_KEY']
access_secret = os.environ['TWITTER_ACCESS_SECRET']
except KeyError:
print ('WARNING: Twitter account not configured.')
return False
try:
text = instance.get_twitter_message()
except AttributeError:
text = str(instance)
mesg = '%s' % (text)
if len(mesg) > TWITTER_MAXLENGTH:
size = len(mesg + '...') - TWITTER_MAXLENGTH
mesg = '%s...' % (text[:-size])
import logging
logger = logging.getLogger('testlogger')
if not settings.DEBUG:
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
file = default_storage.open(instance.screenshot.file.name, 'rb')
media_ids = api.media_upload(filename=unidecode(instance.screenshot.file.name), file=file)
params = dict(status=mesg, media_ids=[media_ids.media_id_string])
api.update_status(**params)
except Exception as ex:
print(('ERROR:', str(ex)))
logger.debug('rem %s' % str(ex))
return False
signals.post_save.connect(post_to_twitter, sender=Issue)
class Hunt(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
url = models.URLField()
prize = models.IntegerField()
logo = models.ImageField(upload_to="logos", null=True, blank=True)
plan = models.CharField(max_length=10)
txn_id = models.CharField(max_length=50, null=True, blank=True)
color = models.CharField(max_length=10, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
@property
def domain_title(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
class Meta:
ordering = ['-id']
class Points(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
issue = models.ForeignKey(Issue, null=True, blank=True, on_delete=models.CASCADE)
domain = models.ForeignKey(Domain, null=True, blank=True, on_delete=models.CASCADE)
score = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# @receiver(user_logged_in, dispatch_uid="some.unique.string.id.for.allauth.user_logged_in")
# def user_logged_in_(request, user, **kwargs):
# if not settings.TESTING:
# action.send(user, verb='logged in')
class InviteFriend(models.Model):
sender = models.ForeignKey(User, on_delete=models.CASCADE)
recipient = models.EmailField()
sent = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
ordering = ('-sent',)
verbose_name = 'invitation'
verbose_name_plural = 'invitations'
def user_images_path(instance, filename):
from django.template.defaultfilters import slugify
filename, ext = os.path.splitext(filename)
return 'avatars/user_{0}/{1}{2}'.format(instance.user.id, slugify(filename), ext)
class UserProfile(models.Model):
title = (
(0, 'Unrated'),
(1, 'Bronze'),
(2, 'Silver'),
(3, 'Gold'),
(4, 'Platinum'),
)
follows = models.ManyToManyField('self', related_name='follower', symmetrical=False, blank=True)
user = AutoOneToOneField('auth.user', related_name="userprofile", on_delete=models.CASCADE)
user_avatar = models.ImageField(upload_to=user_images_path, blank=True, null=True)
title = models.IntegerField(choices=title, default=0)
winnings = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
issue_upvoted = models.ManyToManyField(Issue, blank=True, related_name="upvoted")
issue_saved = models.ManyToManyField(Issue, blank=True, related_name="saved")
def avatar(self, size=36):
if self.user_avatar:
return self.user_avatar.url
for account in self.user.socialaccount_set.all():
if 'avatar_url' in account.extra_data:
return account.extra_data['avatar_url']
elif 'picture' in account.extra_data:
return account.extra_data['picture']
def __unicode__(self):
return self.user.email
def create_profile(sender, **kwargs):
user = kwargs["instance"]
if kwargs["created"]:
profile = UserProfile(user=user)
profile.save()
post_save.connect(create_profile, sender=User)
class IP(models.Model):
address = models.CharField(max_length=25, null=True, blank=True)
user = models.CharField(max_length=25, null=True, blank=True)
issuenumber = models.IntegerField(null=True, blank=True)
def ipaddress(self):
return self.ipaddress
def user_name(self):
return self.user
def issue_number(self):
return self.issuenumber |
awemulya/fieldsight-kobocat | refs/heads/master | onadata/apps/fieldsight/generatereport.py | 1 | import json
import time
import datetime
from datetime import date
from io import BytesIO
from reportlab.lib.pagesizes import letter, A4
from reportlab.platypus import SimpleDocTemplate, Paragraph
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.enums import TA_CENTER
from reportlab.lib.units import inch
from reportlab.pdfgen import canvas
from reportlab.lib.units import mm
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle, PageBreak
from reportlab.platypus import Image
from reportlab.lib import colors
from onadata.apps.fsforms.reports_util import get_instaces_for_site_individual_form
from django.db.models import Prefetch
from onadata.apps.fsforms.models import FieldSightXF, FInstance, Site
from reportlab.lib.enums import TA_RIGHT
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.lib.styles import ParagraphStyle as PS
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from onadata.libs.utils.image_tools import image_url
from onadata.apps.logger.models import Attachment
from .metaAttribsGenerator import generateSiteMetaAttribs
styleSheet = getSampleStyleSheet()
styles = getSampleStyleSheet()
class MyDocTemplate(SimpleDocTemplate):
def __init__(self, filename, **kw):
self.allowSplitting = 1
apply(SimpleDocTemplate.__init__, (self, filename), kw)
pdfmetrics.registerFont(TTFont('arialuni', 'ARIALUNI.TTF'))
# Entries to the table of contents can be done either manually by
# calling the addEntry method on the TableOfContents object or automatically
# by sending a 'TOCEntry' notification in the afterFlowable method of
# the DocTemplate you are using. The data to be passed to notify is a list
# of three or four items countaining a level number, the entry text, the page
# number and an optional destination key which the entry should point to.
# This list will usually be created in a document template's method like
# afterFlowable(), making notification calls using the notify() method
# with appropriate data.
def afterFlowable(self, flowable):
if flowable.__class__.__name__ == 'Paragraph':
text = flowable.getPlainText()
style = flowable.style.name
if style == 'Heading1':
key = 'h1-%s' % self.seq.nextf('heading1')
self.canv.bookmarkPage(key)
self.notify('TOCEntry', (0, text, self.page, key))
if style == 'Heading2':
key = 'h2-%s' % self.seq.nextf('heading2')
self.canv.bookmarkPage(key)
self.notify('TOCEntry', (1, text, self.page, key))
if style == 'Heading3':
key = 'h3-%s' % self.seq.nextf('heading3')
self.canv.bookmarkPage(key)
self.notify('TOCEntry', (2, text, self.page, key))
class PDFReport:
def __init__(self, buffer, pagesize):
self.main_answer = {}
self.question={}
self.data=[]
self.additional_data=[]
self.buffer = buffer
if pagesize == 'A4':
self.pagesize = A4
elif pagesize == 'Letter':
self.pagesize = letter
self.width, self.height = self.pagesize
self.base_url = ''
self.media_folder = ''
self.project_name = ''
self.project_logo = ''
self.removeNullField = False
self.centered = PS(name = 'centered',
fontSize = 14,
leading = 16,
alignment = 1,
spaceAfter = 20,
fontName = 'arialuni')
self.bodystyle = PS(
name = 'bodystyle',
parent=styles['Normal'],
fontSize = 8,
fontName = 'arialuni',
)
self.paragraphstyle = PS(
name = 'paragraphstyle',
parent=styles['Normal'],
fontSize = 9,
fontName = 'arialuni',
)
self.h1 = PS(
name = 'Heading1',
fontSize = 16,
leading = 16,
fontName = 'arialuni',
spaceAfter = 20,)
self.h2 = PS(name = 'Heading2',
fontSize = 14,
leading = 14,
fontName = 'arialuni',
spaceAfter = 20)
self.h3 = PS(name = 'Heading3',
fontSize = 12,
leading = 12,
fontName = 'arialuni',
spaceAfter = 20,)
self.ts1 = TableStyle([
('ALIGN', (0,0), (-1,0), 'RIGHT'),
('BACKGROUND', (0,0), (-1,0), colors.white),
('VALIGN', (0,0), (-1,-1), 'TOP'),
('GRID', (0,0), (-1,-1), 0.1, colors.lightgrey),
])
buffer = self.buffer
self.doc = MyDocTemplate(buffer,
rightMargin=72,
leftMargin=72,
topMargin=72,
bottomMargin=72,
pagesize=self.pagesize)
def create_logo(self, absolute_path):
try:
image = Image(absolute_path)
image._restrictSize(2.5 * inch, 2.5 * inch)
except:
image = Image('http://' + self.base_url +'/static/images/img-404.jpg')
image._restrictSize(1.5 * inch, 1.5 * inch)
return image
def _header_footer(self, canvas, doc):
# Save the state of our canvas so we can draw on it
canvas.saveState()
style_right = ParagraphStyle(name='right', parent=self.bodystyle, fontName='arialuni',
fontSize=10, alignment=TA_RIGHT)
fieldsight_logo = Image('http://' + self.base_url +'/static/images/fs1.jpg')
fieldsight_logo._restrictSize(1.5 * inch, 1.5 * inch)
# headerleft = Paragraph("FieldSight", self.bodystyle)
headerright = Paragraph(self.project_name, style_right)
# w1, h1 = headerleft.wrap(doc.width, doc.topMargin)
w2, h2 = headerright.wrap(doc.width, doc.topMargin)
textWidth = stringWidth(self.project_name, fontName='arialuni',
fontSize=10)
fieldsight_logo.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin + 12)
headerright.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin + 20)
try:
project_logo = Image(self.project_logo)
project_logo._restrictSize(0.4 * inch, 0.4 * inch)
project_logo.drawOn(canvas, headerright.width + doc.leftMargin -0.5 * inch - textWidth, doc.height + doc.topMargin + 10)
except:
pass
# header.drawOn(canvas, doc.leftMargin + doc.width, doc.height + doc.topMargin +20)
# Footer
footer = Paragraph('Page no. '+str(canvas._pageNumber), style_right)
w, h = footer.wrap(doc.width, doc.bottomMargin)
footer.drawOn(canvas, doc.leftMargin, h + 40)
# Release the canvas
canvas.restoreState()
def append_row(self, question_name, question_label, question_type, answer_dict):
styNormal = self.bodystyle
styBackground = ParagraphStyle('background', parent=styNormal, backColor=colors.white)
if question_name in answer_dict:
if question_type == 'note':
answer = Paragraph('', styBackground)
isNull = True
elif question_type == 'photo':
#photo = '/media/user/attachments/'+ r_answer[r_question+"/"+question]
size = "small"
try:
result = Attachment.objects.filter(media_file=self.media_folder +'/attachments/'+ answer_dict[question_name])[0:1]
attachment = result[0]
if not attachment.mimetype.startswith('image'):
media_url = 'http://' + self.base_url +'/static/images/img-404.jpg'
media_url = image_url(attachment, size)
except:
media_url = 'http://' + self.base_url +'/static/images/img-404.jpg'
answer = self.create_logo(media_url)
isNull = False
# answer =''
elif question_type == 'audio' or question_type == 'video':
media_link = 'http://'+self.base_url+'/attachment/medium?media_file='+ self.media_folder +'/attachments/'+ answer_dict[question_name]
answer = Paragraph('<link href="'+media_link+'">Attachment</link>', styBackground)
isNull = False
else:
answer_text=answer_dict[question_name]
if len(answer_text) > 1200:
new_answer_text = answer_text[0:360]
answer_text = new_answer_text + ".... ( full answer followed after this table. )"
self.additional_data.append({question_label : answer_dict[question_name]})
answer = Paragraph(answer_text, styBackground)
isNull = False
else:
answer = Paragraph('', styBackground)
isNull = True
if self.removeNullField and isNull:
pass
else:
row=[Paragraph(question_label, styBackground), answer]
self.data.append(row)
def parse_repeat(self, prev_groupname, r_object, nr_answer):
r_question = prev_groupname + r_object['name']
for r_answer in nr_answer:
for first_children in r_object['children']:
question_name = r_question+"/"+first_children['name']
if first_children['type'] == 'group':
self.parse_group(r_question+"/",first_children, r_answer.get('question_name', {}))
elif first_children['type'] == "repeat":
self.parse_repeat(r_question+"/", first_children, r_answer.get('question_name', []))
else:
question_label = question_name
if 'label' in first_children:
question_label = first_children['label']
self.append_row(question_name, question_label, first_children['type'], r_answer)
def parse_group(self, prev_groupname, g_object, g_answer):
g_question = prev_groupname + g_object['name']
for first_children in g_object['children']:
question_name = g_question+"/"+first_children['name']
if first_children['type'] == 'group':
self.parse_group(g_question+"/",first_children, g_answer)
elif first_children['type'] == "repeat":
self.parse_repeat(g_question+"/", first_children, g_answer.get('question_name', []))
else:
question_label = question_name
if 'label' in first_children:
question_label = first_children['label']
self.append_row(question_name, question_label, first_children['type'], g_answer)
def parse_individual_questions(self, parent_object):
for first_children in parent_object:
if first_children['type'] == "repeat":
self.parse_repeat("", first_children, self.main_answer.get(first_children['name'], []))
elif first_children['type'] == 'group':
self.parse_group("", first_children, self.main_answer)
else:
question_name = first_children['name']
question_label = question_name
if 'label' in first_children:
question_label = first_children['label']
self.append_row(question_name, question_label, first_children['type'], self.main_answer)
def append_answers(self, json_question, instance, sub_count):
elements = []
if instance.form_status == 0:
form_status = "Pending"
elif instance.form_status == 1:
form_status = "Rejected"
elif instance.form_status == 2:
form_status = "Flagged"
elif instance.form_status == 3:
form_status = "Approved"
sub_count += 1
elements.append(Spacer(0,10))
elements.append(Paragraph("Submision "+ str(sub_count), self.paragraphstyle))
elements.append(Paragraph("Status : "+form_status, self.paragraphstyle))
elements.append(Paragraph("Submitted By:"+instance.submitted_by.username, self.paragraphstyle))
elements.append(Paragraph("Submitted Date:"+str(instance.date), self.paragraphstyle))
elements.append(Spacer(0,10))
self.data = []
self.additional_data=[]
self.main_answer = instance.instance.json
question = json.loads(json_question)
self.parse_individual_questions(question['children'])
t1 = Table(self.data, colWidths=(60*mm, None))
t1.setStyle(self.ts1)
elements.append(t1)
elements.append(Spacer(0,10))
if self.additional_data:
elements.append(Paragraph("Full Answers", styles['Heading4']))
for items in self.additional_data:
for k,v in items.items():
elements.append(Paragraph(k + " : ", styles['Heading5']))
elements.append(Paragraph(v, self.paragraphstyle))
elements.append(Spacer(0,10))
return elements
def generateFullReport(self, pk, base_url):
self.base_url = base_url
# Our container for 'Flowable' objects
elements = []
toc = TableOfContents()
toc.levelStyles = [
PS(fontName='arialuni', fontSize=12, name='TOCHeading1', leftIndent=20, firstLineIndent=-20, spaceBefore=5, leading=10),
PS(fontName='arialuni', fontSize=10, name='TOCHeading2', leftIndent=40, firstLineIndent=-20, spaceBefore=3, leading=10),
PS(fontName='arialuni', fontSize=9, name='TOCHeading3', leftIndent=40, firstLineIndent=-20, spaceBefore=3, leading=10),
]
elements.append(Paragraph('Responses Report for Site', self.centered))
elements.append(PageBreak())
elements.append(Paragraph('Table of contents', self.centered))
elements.append(toc)
elements.append(PageBreak())
# A large collection of style sheets pre-made for us
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name='centered', alignment=TA_CENTER))
site = Site.objects.select_related('project').get(pk=pk)
self.project_name = site.project.name
self.project_logo = site.project.logo.url
elements.append(Paragraph(site.name, self.h1))
elements.append(Paragraph(site.identifier, styles['Normal']))
if site.address:
elements.append(Paragraph(site.address, styles['Normal']))
if site.phone:
elements.append(Paragraph(site.phone, styles['Normal']))
if site.region:
elements.append(Paragraph(site.region.name, styles['Normal']))
elements.append(PageBreak())
elements.append(Paragraph('Responses', self.h2))
forms = FieldSightXF.objects.select_related('xf').filter(is_survey=False, is_deleted=False).filter(Q(site_id=site.id, from_project=False) | Q(project_id=site.project_id)).prefetch_related(Prefetch('site_form_instances', queryset=FInstance.objects.select_related('instance')), Prefetch('project_form_instances', queryset=FInstance.objects.select_related('instance').filter(site_id=site.id))).order_by('-is_staged', 'is_scheduled')
if not forms:
elements.append(Paragraph("No Any Responses Yet.", styles['Heading5']))
#a=FieldSightXF.objects.select_related('xf').filter(site_id=291).prefetch_related(Prefetch('site_form_instances', queryset=FInstance.objects.select_related('instance')))
styNormal = styleSheet['Normal']
styBackground = ParagraphStyle('background', parent=styNormal, backColor=colors.white)
for form in forms:
elements.append(Spacer(0,10))
elements.append(Paragraph(form.xf.title, self.h3))
elements.append(Paragraph(form.form_type() + " Form", styles['Heading4']))
if form.stage:
if form.stage.stage:
elements.append(Paragraph("Stage Id: " + str(form.stage.stage.order), self.paragraphstyle))
elements.append(Paragraph("Sub Stage Id: " + str(form.stage.order), self.paragraphstyle))
else:
elements.append(Paragraph("Stage Id: " + str(form.stage.order), self.paragraphstyle))
json_question = form.xf.json
form_user_name = form.xf.user.username
self.media_folder = form_user_name
#cursor = get_instaces_for_site_individual_form(form.id)
sub_count = 0
if not form.from_project and form.site_form_instances.all():
for instance in form.site_form_instances.all():
self.append_answers(json_question, instance, sub_count)
elif form.project_form_instances.all():
for instance in form.project_form_instances.all():
self.append_answers(json_question, instance, sub_count)
else:
elements.append(Paragraph("No Submisions Yet. ", styles['Heading5']))
elements.append(Spacer(0,10))
self.doc.multiBuild(elements, onLaterPages=self._header_footer)
def print_individual_response(self, pk, base_url, remove_null_fields):
self.base_url = base_url
if remove_null_fields == "1":
self.removeNullField = True
# Our container for 'Flowable' objects
elements = []
instance = FInstance.objects.get(instance_id=pk)
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name='centered', alignment=TA_CENTER))
if instance.site_fxf:
site = instance.site
project = site.project
form = instance.site_fxf
else:
form = instance.project_fxf
project = instance.project
self.project_name = project.name
self.project_logo = project.logo.url
styNormal = styleSheet['Normal']
styBackground = ParagraphStyle('background', parent=styNormal, backColor=colors.white)
if instance.site:
elements.append(Paragraph("Site Identifier : " + instance.site.identifier, self.h3))
elements.append(Paragraph("Site Name : " + instance.site.name, self.h3))
elements.append(Spacer(0,10))
elements.append(Paragraph(form.xf.title, self.h3))
elements.append(Paragraph(form.form_type() + " Form", styles['Heading4']))
if form.stage:
if form.stage.stage:
elements.append(Paragraph("Stage Id: " + str(form.stage.stage.order), styles['Heading5']))
elements.append(Paragraph("Sub Stage Id: " + str(form.stage.order), styles['Heading5']))
else:
elements.append(Paragraph("Stage Id: " + str(form.stage.order), styles['Heading5']))
json_question = form.xf.json
form_user_name = form.xf.user.username
self.media_folder = form_user_name
if instance.form_status == 0:
form_status = "Pending"
elif instance.form_status == 1:
form_status = "Rejected"
elif instance.form_status == 2:
form_status = "Flagged"
elif instance.form_status == 3:
form_status = "Approved"
elements.append(Spacer(0,10))
elements.append(Paragraph("Status : "+form_status, styles['Normal']))
elements.append(Paragraph("Submitted By:"+instance.submitted_by.username, styles['Normal']))
elements.append(Paragraph("Submitted Date:"+str(instance.date), styles['Normal']))
elements.append(Spacer(0,10))
self.data = []
self.additional_data =[]
self.main_answer = instance.instance.json
question = json.loads(json_question)
self.parse_individual_questions(question['children'])
t1 = Table(self.data, colWidths=(60*mm, None))
t1.setStyle(self.ts1)
elements.append(t1)
elements.append(Spacer(0,10))
if self.additional_data:
elements.append(Paragraph("Full Answers", styles['Heading4']))
for items in self.additional_data:
for k,v in items.items():
elements.append(Paragraph(k + " : ", styles['Heading5']))
elements.append(Paragraph(v, self.paragraphstyle))
elements.append(Spacer(0,10))
self.doc.multiBuild(elements, onFirstPage=self._header_footer, onLaterPages=self._header_footer)
def generateCustomSiteReport(self, pk, base_url, fs_ids, startdate, enddate, removeNullField):
self.base_url = base_url
self.removeNullField = removeNullField
# Our container for 'Flowable' objects
elements = []
toc = TableOfContents()
toc.levelStyles = [
PS(fontName='arialuni', fontSize=12, name='TOCHeading1', leftIndent=20, firstLineIndent=-20, spaceBefore=5, leading=10),
PS(fontName='arialuni', fontSize=10, name='TOCHeading2', leftIndent=40, firstLineIndent=-20, spaceBefore=3, leading=10),
PS(fontName='arialuni', fontSize=9, name='TOCHeading3', leftIndent=40, firstLineIndent=-20, spaceBefore=3, leading=10),
]
elements.append(Paragraph('Custom Responses Report for Site', self.centered))
elements.append(PageBreak())
elements.append(Paragraph('Table of contents', self.centered))
elements.append(toc)
elements.append(PageBreak())
# A large collection of style sheets pre-made for us
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name='centered', alignment=TA_CENTER))
site = Site.objects.select_related('project').get(pk=pk)
self.project_name = site.project.name
self.project_logo = site.project.logo.url
elements.append(Paragraph(site.name, self.h1))
elements.append(Paragraph(site.identifier, styles['Normal']))
if site.address:
elements.append(Paragraph(site.address, styles['Normal']))
if site.phone:
elements.append(Paragraph(site.phone, styles['Normal']))
if site.region:
elements.append(Paragraph(site.region.name, styles['Normal']))
elements.append(Spacer(0,10))
elements.append(Paragraph("Site Information", styles['Normal']))
metas = generateSiteMetaAttribs(pk)
styBackground = ParagraphStyle('background', parent=self.bodystyle, backColor=colors.white)
meta_data=[]
if metas:
for meta in metas:
row=[Paragraph(str(meta['question_text']), styBackground), Paragraph(str(meta['answer']), styBackground)]
meta_data.append(row)
metat1 = Table(meta_data, colWidths=(60*mm, None))
metat1.setStyle(self.ts1)
elements.append(metat1)
elements.append(PageBreak())
elements.append(Paragraph('Responses', self.h2))
split_startdate = startdate.split('-')
split_enddate = enddate.split('-')
new_startdate = date(int(split_startdate[0]), int(split_startdate[1]), int(split_startdate[2]))
end = date(int(split_enddate[0]), int(split_enddate[1]), int(split_enddate[2]))
new_enddate = end + datetime.timedelta(days=1)
forms = FieldSightXF.objects.select_related('xf').filter(pk__in=fs_ids, is_survey=False, is_deleted=False).prefetch_related(Prefetch('site_form_instances', queryset=FInstance.objects.select_related('instance').filter(date__range=[new_startdate, new_enddate])), Prefetch('project_form_instances', queryset=FInstance.objects.select_related('instance').filter(site_id=site.id, date__range=[new_startdate, new_enddate]))).order_by('-is_staged', 'is_scheduled')
if not forms:
elements.append(Paragraph("No Any Responses Yet.", styles['Heading5']))
#a=FieldSightXF.objects.select_related('xf').filter(site_id=291).prefetch_related(Prefetch('site_form_instances', queryset=FInstance.objects.select_related('instance')))
styNormal = styleSheet['Normal']
styBackground = ParagraphStyle('background', parent=styNormal, backColor=colors.white)
for form in forms:
elements.append(Spacer(0,10))
elements.append(Paragraph(form.xf.title, self.h3))
elements.append(Paragraph(form.form_type() + " Form", styles['Heading4']))
if form.stage:
if form.stage.stage:
elements.append(Paragraph("Stage Id: " + str(form.stage.stage.order), self.paragraphstyle))
elements.append(Paragraph("Sub Stage Id: " + str(form.stage.order), self.paragraphstyle))
else:
elements.append(Paragraph("Stage Id: " + str(form.stage.order), self.paragraphstyle))
json_question = form.xf.json
form_user_name = form.xf.user.username
self.media_folder = form_user_name
#cursor = get_instaces_for_site_individual_form(form.id)
sub_count = 0
if not form.from_project and form.site_form_instances.all():
for instance in form.site_form_instances.all():
new_elements = self.append_answers(json_question, instance, sub_count)
elements+=new_elements
elif form.project_form_instances.all():
for instance in form.project_form_instances.all():
new_elements = self.append_answers(json_question, instance, sub_count)
elements+=new_elements
else:
elements.append(Paragraph("No Submisions Yet. ", styles['Heading5']))
elements.append(Spacer(0,10))
self.doc.multiBuild(elements, onLaterPages=self._header_footer) |
beernarrd/gramps | refs/heads/sl-master | gramps/gen/simple/_simpledoc.py | 10 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Provide a simplified database access interface to the Gramps database.
"""
from ..plug.docgen import StyleSheet, ParagraphStyle, TableStyle,\
TableCellStyle, FONT_SANS_SERIF, PARA_ALIGN_LEFT
class SimpleDoc:
"""
Provide a simplified database access interface to the Gramps database.
"""
def __init__(self, doc):
"""
Initialize the class with the real document
"""
self.doc = doc
def __write(self, format, text):
"""
Writes a paragraph using the specified format to the BaseDoc
"""
self.doc.start_paragraph(format)
self.doc.write_text(text)
self.doc.end_paragraph()
def title(self, text):
"""
Writes the Title using the Title paragraph
"""
self.__write('Title', text)
def header1(self, text):
"""
Writes the first level header using the Header1 paragraph
"""
self.__write('Header1', text)
def header2(self, text):
"""
Writes the second level header using the Header2 paragraph
"""
self.__write('Header2', text)
def header3(self, text):
"""
Writes the third level header using the Header3 paragraph
"""
self.__write('Header3', text)
def paragraph(self, text):
"""
Writes a paragraph using the Normal format
"""
self.__write('Normal', text)
def make_basic_stylesheet(**kwargs):
"""
Create the basic style sheet for the SimpleDoc class.
kwargs - a dictionary of the form:
item={method: value, ...}, ...
Example:
make_basic_stylesheet(Table={"set_width": 90})
"""
sheet = StyleSheet()
pstyle = ParagraphStyle()
fstyle = pstyle.get_font()
fstyle.set_type_face(FONT_SANS_SERIF)
fstyle.set_size(14)
fstyle.set_bold(True)
pstyle.set_font(fstyle)
pstyle.set_alignment(PARA_ALIGN_LEFT)
# Handle args:
if "Title" in kwargs:
for method in kwargs["Title"]:
value = kwargs["Title"][method]
if value is not None:
getattr(pstyle, method)(value)
sheet.add_paragraph_style('Title', pstyle)
pstyle = ParagraphStyle()
fstyle = pstyle.get_font()
fstyle.set_type_face(FONT_SANS_SERIF)
fstyle.set_size(12)
fstyle.set_bold(True)
pstyle.set_font(fstyle)
pstyle.set_alignment(PARA_ALIGN_LEFT)
pstyle.set_tabs([4, 8, 12, 16])
# Handle args:
if "Header1" in kwargs:
for method in kwargs["Header1"]:
value = kwargs["Header1"][method]
if value is not None:
getattr(pstyle, method)(value)
sheet.add_paragraph_style('Header1', pstyle)
pstyle = ParagraphStyle()
fstyle = pstyle.get_font()
fstyle.set_type_face(FONT_SANS_SERIF)
fstyle.set_size(10)
fstyle.set_bold(True)
pstyle.set_font(fstyle)
pstyle.set_alignment(PARA_ALIGN_LEFT)
pstyle.set_tabs([4, 8, 12, 16])
# Handle args:
if "Header2" in kwargs:
for method in kwargs["Header2"]:
value = kwargs["Header2"][method]
if value is not None:
getattr(pstyle, method)(value)
sheet.add_paragraph_style('Header2', pstyle)
pstyle = ParagraphStyle()
fstyle = pstyle.get_font()
fstyle.set_type_face(FONT_SANS_SERIF)
fstyle.set_size(10)
fstyle.set_bold(True)
fstyle.set_italic(True)
pstyle.set_font(fstyle)
pstyle.set_alignment(PARA_ALIGN_LEFT)
pstyle.set_tabs([4, 8, 12, 16])
# Handle args:
if "Header3" in kwargs:
for method in kwargs["Header3"]:
value = kwargs["Header3"][method]
if value is not None:
getattr(pstyle, method)(value)
sheet.add_paragraph_style('Header3', pstyle)
pstyle = ParagraphStyle()
pstyle.set_tabs([4, 8, 12, 16])
# Handle args:
if "Normal" in kwargs:
for method in kwargs["Normal"]:
value = kwargs["Normal"][method]
if value is not None:
getattr(pstyle, method)(value)
sheet.add_paragraph_style('Normal', pstyle)
# Styles for tables:
tbl = TableStyle()
tbl.set_width(100)
tbl.set_columns(2)
tbl.set_column_width(0,20)
tbl.set_column_width(1,80)
# Handle args:
if "Table" in kwargs:
for method in kwargs["Table"]:
value = kwargs["Table"][method]
if value is not None:
getattr(tbl, method)(value)
sheet.add_table_style("Table",tbl)
cell = TableCellStyle()
cell.set_top_border(1)
cell.set_bottom_border(1)
# Handle args:
if "TableHead" in kwargs:
for method in kwargs["TableHead"]:
value = kwargs["TableHead"][method]
if value is not None:
getattr(cell, method)(value)
sheet.add_cell_style("TableHead",cell)
cell = TableCellStyle()
# Handle args:
if "TableHeaderCell" in kwargs:
for method in kwargs["TableHeaderCell"]:
value = kwargs["TableHeaderCell"][method]
if value is not None:
getattr(cell, method)(value)
sheet.add_cell_style("TableHeaderCell",cell)
cell = TableCellStyle()
cell.set_longlist(1)
# Handle args:
if "TableDataCell" in kwargs:
for method in kwargs["TableDataCell"]:
value = kwargs["TableDataCell"][method]
if value is not None:
getattr(cell, method)(value)
sheet.add_cell_style("TableDataCell",cell)
return sheet
|
ndemir/scrapy | refs/heads/master | scrapy/utils/testproc.py | 19 | import sys
import os
from twisted.internet import reactor, defer, protocol
class ProcessTest(object):
command = None
prefix = [sys.executable, '-m', 'scrapy.cmdline']
cwd = os.getcwd() # trial chdirs to temp dir
def execute(self, args, check_code=True, settings='missing'):
env = os.environ.copy()
env['SCRAPY_SETTINGS_MODULE'] = settings
cmd = self.prefix + [self.command] + list(args)
pp = TestProcessProtocol()
pp.deferred.addBoth(self._process_finished, cmd, check_code)
reactor.spawnProcess(pp, cmd[0], cmd, env=env, path=self.cwd)
return pp.deferred
def _process_finished(self, pp, cmd, check_code):
if pp.exitcode and check_code:
msg = "process %s exit with code %d" % (cmd, pp.exitcode)
msg += "\n>>> stdout <<<\n%s" % pp.out
msg += "\n"
msg += "\n>>> stderr <<<\n%s" % pp.err
raise RuntimeError(msg)
return pp.exitcode, pp.out, pp.err
class TestProcessProtocol(protocol.ProcessProtocol):
def __init__(self):
self.deferred = defer.Deferred()
self.out = ''
self.err = ''
self.exitcode = None
def outReceived(self, data):
self.out += data
def errReceived(self, data):
self.err += data
def processEnded(self, status):
self.exitcode = status.value.exitCode
self.deferred.callback(self)
|
masayukig/tempest | refs/heads/master | tempest/api/network/test_service_providers.py | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.network import base
from tempest.common import utils
from tempest.lib import decorators
class ServiceProvidersTest(base.BaseNetworkTest):
@decorators.idempotent_id('2cbbeea9-f010-40f6-8df5-4eaa0c918ea6')
@testtools.skipUnless(
utils.is_extension_enabled('service-type', 'network'),
'service-type extension not enabled.')
def test_service_providers_list(self):
body = self.service_providers_client.list_service_providers()
self.assertIn('service_providers', body)
self.assertIsInstance(body['service_providers'], list)
|
romain-dartigues/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_dnszone_facts.py | 7 | #!/usr/bin/python
#
# Copyright (c) 2017 Obezimnaka Boms, <t-ozboms@microsoft.com>
#
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_dnszone_facts
version_added: "2.4"
short_description: Get DNS zone facts.
description:
- Get facts for a specific DNS zone or all DNS zones within a resource group.
options:
resource_group:
description:
- Limit results by resource group. Required when filtering by name.
name:
description:
- Only show results for a specific zone.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Obezimnaka Boms (@ozboms)"
'''
EXAMPLES = '''
- name: Get facts for one zone
azure_rm_dnszone_facts:
resource_group: Testing
name: foobar22
- name: Get facts for all zones in a resource group
azure_rm_dnszone_facts:
resource_group: Testing
- name: Get facts by tags
azure_rm_dnszone_facts:
tags:
- testing
'''
RETURN = '''
azure_dnszones:
description: List of zone dicts.
returned: always
type: list
example: [{
"etag": "00000002-0000-0000-0dcb-df5776efd201",
"location": "global",
"properties": {
"maxNumberOfRecordSets": 5000,
"numberOfRecordSets": 15
},
"tags": {}
}]
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'DnsZone'
class AzureRMDNSZoneFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
# store the results of the module operation
self.results = dict(
changed=False,
ansible_facts=dict(azure_dnszones=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMDNSZoneFacts, self).__init__(self.module_arg_spec)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
# list the conditions and what to return based on user input
if self.name is not None:
# if there is a name, facts about that specific zone
self.results['ansible_facts']['azure_dnszones'] = self.get_item()
elif self.resource_group:
# all the zones listed in that specific resource group
self.results['ansible_facts']['azure_dnszones'] = self.list_resource_group()
else:
# all the zones in a subscription
self.results['ansible_facts']['azure_dnszones'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
results = []
# get specific zone
try:
item = self.dns_client.zones.get(self.resource_group, self.name)
except CloudError:
pass
# serialize result
if item and self.has_tags(item.tags, self.tags):
results = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return results
def list_resource_group(self):
self.log('List items for resource group')
try:
response = self.dns_client.zones.list_by_resource_group(self.resource_group)
except AzureHttpError as exc:
self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def list_items(self):
self.log('List all items')
try:
response = self.dns_client.zones.list()
except AzureHttpError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMDNSZoneFacts()
if __name__ == '__main__':
main()
|
cxxgtxy/tensorflow | refs/heads/master | tensorflow/python/summary/summary_test.py | 57 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import json_format
from tensorflow.core.framework import summary_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary as summary_lib
class ScalarSummaryTest(test.TestCase):
def testScalarSummary(self):
with self.test_session() as s:
i = constant_op.constant(3)
with ops.name_scope('outer'):
im = summary_lib.scalar('inner', i)
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 1)
self.assertEqual(values[0].tag, 'outer/inner')
self.assertEqual(values[0].simple_value, 3.0)
def testSummarizingVariable(self):
with self.test_session() as s:
c = constant_op.constant(42.0)
v = variables.Variable(c)
ss = summary_lib.scalar('summary', v)
init = variables.global_variables_initializer()
s.run(init)
summ_str = s.run(ss)
summary = summary_pb2.Summary()
summary.ParseFromString(summ_str)
self.assertEqual(len(summary.value), 1)
value = summary.value[0]
self.assertEqual(value.tag, 'summary')
self.assertEqual(value.simple_value, 42.0)
def testImageSummary(self):
with self.test_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
im = summary_lib.image('inner', i, max_outputs=3)
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('outer/inner/image/{}'.format(i) for i in xrange(3))
self.assertEqual(tags, expected)
def testHistogramSummary(self):
with self.test_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
summ_op = summary_lib.histogram('inner', i)
summary_str = s.run(summ_op)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
self.assertEqual(len(summary.value), 1)
self.assertEqual(summary.value[0].tag, 'outer/inner')
def testSummaryNameConversion(self):
c = constant_op.constant(3)
s = summary_lib.scalar('name with spaces', c)
self.assertEqual(s.op.name, 'name_with_spaces')
s2 = summary_lib.scalar('name with many $#illegal^: characters!', c)
self.assertEqual(s2.op.name, 'name_with_many___illegal___characters_')
s3 = summary_lib.scalar('/name/with/leading/slash', c)
self.assertEqual(s3.op.name, 'name/with/leading/slash')
if __name__ == '__main__':
test.main()
|
collective/mr.poe | refs/heads/master | raven/contrib/django/raven_compat/templatetags/raven.py | 1 | """
raven.contrib.django.raven_compat.templatetags.raven
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from raven.contrib.django.templatetags.raven import * # NOQA
|
nwjs/blink | refs/heads/nw12 | Tools/Scripts/webkitpy/layout_tests/controllers/repaint_overlay_unittest.py | 59 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from webkitpy.layout_tests.controllers import repaint_overlay
LAYER_TREE = """{
"bounds":[800.00,600.00],
"children":[
{
"position": [8.00, 80.00],
"bounds": [800.00, 600.00],
"contentsOpaque": true,
"drawsContent": true,
"repaintRects": [
[8, 108, 100, 100],
[0, 216, 800, 100]
]
}
]
}
"""
class TestRepaintOverlay(unittest.TestCase):
def test_result_contains_repaint_rects(self):
self.assertTrue(repaint_overlay.result_contains_repaint_rects(LAYER_TREE))
self.assertFalse(repaint_overlay.result_contains_repaint_rects('ABCD'))
def test_extract_layer_tree(self):
self.assertEquals(LAYER_TREE, repaint_overlay.extract_layer_tree(LAYER_TREE))
|
freedomgames/Planet-Lab | refs/heads/master | backend/tests/s3/views_test.py | 3 | """Tests for handling s3."""
import boto.s3.bucket
import boto.s3.key
import datetime
import json
import mock
import unittest
import backend
import backend.common.s3 as s3
import backend.quests.views as quest_views
import harness
class FakeDateTime(datetime.datetime):
"""Mock object to return a constant 'now' time."""
@staticmethod
def utcnow():
"""Return a constant 'now' time."""
return FakeDateTime(2012, 12, 21)
class S3Test(harness.TestHarness):
"""Tests for handling s3."""
@mock.patch.object(s3.datetime, 'datetime', FakeDateTime)
def test_s3_upload_signature(self):
"""Test the s3_upload_signature function."""
signature = s3.s3_upload_signature('snake.png', 'image/png')
self.assertEqual(signature, {
'file_name': 'snake.png',
's3_url': 'https://bucket.s3.amazonaws.com/snake.png',
'cdn_url': 'http://clouds.cloudfront.net/snake.png',
'upload_args': {
'url': 'https://bucket.s3.amazonaws.com/',
'method': 'POST',
'data': {
'AWSAccessKeyId': 'key',
'success_action_status': '201',
'acl': 'public-read',
'key': 'snake.png',
'Content-Type': 'image/png',
'Signature': 'kETEObkncWe3ZPmvevjVxVNvojQ=',
'Policy': "eyJjb25kaXRpb25zIjogW1siZXEiLCAiJGtleSIsICJz"
"bmFrZS5wbmciXSwgeyJidWNrZXQiOiAiYnVja2V0In0s"
"IHsiYWNsIjogInB1YmxpYy1yZWFkIn0sIFsiZXEiLCAi"
"JENvbnRlbnQtVHlwZSIsICJpbWFnZS9wbmciXSwgeyJzd"
"WNjZXNzX2FjdGlvbl9zdGF0dXMiOiAiMjAxIn1dLCAiZX"
"hwaXJhdGlvbiI6ICIyMDEyLTEyLTIxVDAxOjAwOjAwLjA"
"wMFoifQ=="}}})
@harness.with_sess(user_id=1)
def test_sign_avatar_upload(self):
"""Test the sign-avatar-upload route."""
resp = self.app.get(
self.url_for(
backend.user_views.UserAvatar,
user_id='1',
file_name='a.png',
mime_type='image/png'))
self.assertEqual(
json.loads(resp.data)['s3_url'],
"https://bucket.s3.amazonaws.com/avatars/1/a.png")
@harness.with_sess(user_id=1)
def test_sign_quest_upload(self):
"""Test the sign-quest-upload route."""
resp = self.app.get(
self.url_for(
backend.quest_views.QuestStaticAsset,
quest_id='4',
file_name='b.png',
mime_type='image/png'))
self.assertEqual(
json.loads(resp.data)['s3_url'],
"https://bucket.s3.amazonaws.com/quests/4/b.png")
@harness.with_sess(user_id=1)
@mock.patch.object(quest_views.s3, 'get_bucket')
def test_asset_listing(self, m_get_bucket):
"""Test listing assets for quest uploads."""
class FakeBucket(object):
"""Mock object for an S3 bucket."""
bucket = boto.s3.bucket.Bucket(
connection=s3.get_conn(), name='bucket')
def make_key(self, name):
"""Return a key with the given name in the bucket 'bucket'."""
return boto.s3.key.Key(bucket=self.bucket, name=name)
def list(self, prefix):
"""Mock list method on the bucket."""
return [self.make_key(prefix),
self.make_key(prefix + 'a'),
self.make_key(prefix + 'b')]
m_get_bucket.return_value = FakeBucket()
resp = self.app.get(
self.url_for(
backend.quest_views.QuestStaticAssets, quest_id='4'))
self.assertEqual(json.loads(resp.data), {
"assets": [
{"file_name": "a",
"url": "https://bucket.s3.amazonaws.com/quests/4/a"},
{"file_name": "b",
"url": "https://bucket.s3.amazonaws.com/quests/4/b"}]})
@harness.with_sess(user_id=1)
@mock.patch.object(quest_views.s3, 'get_bucket')
def test_asset_delete(self, m_get_bucket):
"""Test deleting assets for quest uploads."""
class FakeBucket(object):
"""Mock object for an S3 bucket."""
@staticmethod
def delete_key(key):
"""Assert that 'key' is the correct value."""
self.assertEqual(key, 'quests/4/a')
m_get_bucket.return_value = FakeBucket()
resp = self.app.delete(
self.url_for(
backend.quest_views.QuestStaticAsset,
quest_id='4', file_name='a'))
self.assertEqual(resp.status_code, 200)
@mock.patch.object(s3, 'get_conn')
def test_get_bucket(self, m_get_conn):
"""Test the get_bucket function."""
class FakeConn(object):
"""Mock object for an s3 connection."""
@staticmethod
def get_bucket(bucket_name, validate=None):
"""Make sure get_bucket is passed good arguments."""
self.assertEqual(bucket_name, 'bucket')
self.assertFalse(validate)
m_get_conn.return_value = FakeConn()
s3.get_bucket()
if __name__ == '__main__':
unittest.main()
|
kangkot/arangodb | refs/heads/devel | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_zipfile.py | 48 | # We can test part of the module without zlib.
try:
import zlib
except ImportError:
zlib = None
import zipfile, os, unittest, sys, shutil, struct
from StringIO import StringIO
from tempfile import TemporaryFile
from random import randint, random
import test.test_support as support
from test.test_support import TESTFN, run_unittest, findfile
TESTFN2 = TESTFN + "2"
TESTFNDIR = TESTFN + "d"
FIXEDTEST_SIZE = 1000
SMALL_TEST_DATA = [('_ziptest1', '1q2w3e4r5t'),
('ziptest2dir/_ziptest2', 'qawsedrftg'),
('/ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
('ziptest2dir/ziptest3dir/ziptest4dir/_ziptest3', '6y7u8i9o0p')]
class TestsWithSourceFile(unittest.TestCase):
def setUp(self):
self.line_gen = ["Zipfile test line %d. random float: %f" % (i, random())
for i in xrange(FIXEDTEST_SIZE)]
self.data = '\n'.join(self.line_gen) + '\n'
# Make a source file with some lines
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
def makeTestArchive(self, f, compression):
# Create the ZIP archive
zipfp = zipfile.ZipFile(f, "w", compression)
zipfp.write(TESTFN, "another"+os.extsep+"name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
zipfp.close()
def zipTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another"+os.extsep+"name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
zipfp.printdir()
finally:
sys.stdout = stdout
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEquals(len(lines), 4) # Number of files + header
self.assert_('File Name' in lines[0])
self.assert_('Modified' in lines[0])
self.assert_('Size' in lines[0])
fn, date, time, size = lines[1].split()
self.assertEquals(fn, 'another.name')
# XXX: timestamp is not tested
self.assertEquals(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEquals(len(names), 3)
self.assert_(TESTFN in names)
self.assert_("another"+os.extsep+"name" in names)
self.assert_("strfile" in names)
# Check infolist
infos = zipfp.infolist()
names = [ i.filename for i in infos ]
self.assertEquals(len(names), 3)
self.assert_(TESTFN in names)
self.assert_("another"+os.extsep+"name" in names)
self.assert_("strfile" in names)
for i in infos:
self.assertEquals(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another"+os.extsep+"name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEquals(info.filename, nm)
self.assertEquals(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
zipfp.close()
def testStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipTest(f, zipfile.ZIP_STORED)
def zipOpenTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
zipdata1 = []
zipopen1 = zipfp.open(TESTFN)
while 1:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
zipopen2 = zipfp.open("another"+os.extsep+"name")
while 1:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
self.assertEqual(''.join(zipdata1), self.data)
self.assertEqual(''.join(zipdata2), self.data)
zipfp.close()
def testOpenStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipOpenTest(f, zipfile.ZIP_STORED)
def testOpenViaZipInfo(self):
# Create the ZIP archive
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED)
zipfp.writestr("name", "foo")
zipfp.writestr("name", "bar")
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "r")
infos = zipfp.infolist()
data = ""
for info in infos:
data += zipfp.open(info).read()
self.assert_(data == "foobar" or data == "barfoo")
data = ""
for info in infos:
data += zipfp.read(info)
self.assert_(data == "foobar" or data == "barfoo")
zipfp.close()
def zipRandomOpenTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
zipdata1 = []
zipopen1 = zipfp.open(TESTFN)
while 1:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
self.assertEqual(''.join(zipdata1), self.data)
zipfp.close()
def testRandomOpenStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipRandomOpenTest(f, zipfile.ZIP_STORED)
def zipReadlineTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
zipopen = zipfp.open(TESTFN)
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line + '\n')
zipfp.close()
def zipReadlinesTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
ziplines = zipfp.open(TESTFN).readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line + '\n')
zipfp.close()
def zipIterlinesTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for line, zipline in zip(self.line_gen, zipfp.open(TESTFN)):
self.assertEqual(zipline, line + '\n')
zipfp.close()
def testReadlineStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipReadlineTest(f, zipfile.ZIP_STORED)
def testReadlinesStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipReadlinesTest(f, zipfile.ZIP_STORED)
def testIterlinesStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipIterlinesTest(f, zipfile.ZIP_STORED)
if zlib:
def testDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipTest(f, zipfile.ZIP_DEFLATED)
def testOpenDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipOpenTest(f, zipfile.ZIP_DEFLATED)
def testRandomOpenDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipRandomOpenTest(f, zipfile.ZIP_DEFLATED)
def testReadlineDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipReadlineTest(f, zipfile.ZIP_DEFLATED)
def testReadlinesDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipReadlinesTest(f, zipfile.ZIP_DEFLATED)
def testIterlinesDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipIterlinesTest(f, zipfile.ZIP_DEFLATED)
def testLowCompression(self):
# Checks for cases where compressed data is larger than original
# Create the ZIP archive
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_DEFLATED)
zipfp.writestr("strfile", '12')
zipfp.close()
# Get an open object for strfile
zipfp = zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_DEFLATED)
openobj = zipfp.open("strfile")
self.assertEqual(openobj.read(1), '1')
self.assertEqual(openobj.read(1), '2')
def testAbsoluteArcnames(self):
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED)
zipfp.write(TESTFN, "/absolute")
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED)
self.assertEqual(zipfp.namelist(), ["absolute"])
zipfp.close()
def testAppendToZipFile(self):
# Test appending to an existing zipfile
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED)
zipfp.write(TESTFN, TESTFN)
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED)
zipfp.writestr("strfile", self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, "strfile"])
zipfp.close()
def testAppendToNonZipFile(self):
# Test appending to an existing file that is not a zipfile
# NOTE: this test fails if len(d) < 22 because of the first
# line "fpin.seek(-22, 2)" in _EndRecData
d = 'I am not a ZipFile!'*10
f = file(TESTFN2, 'wb')
f.write(d)
f.close()
zipfp = zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED)
zipfp.write(TESTFN, TESTFN)
zipfp.close()
f = file(TESTFN2, 'rb')
f.seek(len(d))
zipfp = zipfile.ZipFile(f, "r")
self.assertEqual(zipfp.namelist(), [TESTFN])
zipfp.close()
f.close()
def test_WriteDefaultName(self):
# Check that calling ZipFile.write without arcname specified produces the expected result
zipfp = zipfile.ZipFile(TESTFN2, "w")
zipfp.write(TESTFN)
self.assertEqual(zipfp.read(TESTFN), file(TESTFN).read())
zipfp.close()
def test_PerFileCompression(self):
# Check that files within a Zip archive can have different compression options
zipfp = zipfile.ZipFile(TESTFN2, "w")
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)
self.assertEqual(dinfo.compress_type, zipfile.ZIP_DEFLATED)
zipfp.close()
def test_WriteToReadonly(self):
# Check that trying to call write() on a readonly ZipFile object
# raises a RuntimeError
zipf = zipfile.ZipFile(TESTFN2, mode="w")
zipf.writestr("somefile.txt", "bogus")
zipf.close()
zipf = zipfile.ZipFile(TESTFN2, mode="r")
self.assertRaises(RuntimeError, zipf.write, TESTFN)
zipf.close()
def testExtract(self):
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED)
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "r")
for fpath, fdata in SMALL_TEST_DATA:
writtenfile = zipfp.extract(fpath)
# make sure it was written to the right place
if os.path.isabs(fpath):
correctfile = os.path.join(os.getcwd(), fpath[1:])
else:
correctfile = os.path.join(os.getcwd(), fpath)
correctfile = os.path.normpath(correctfile)
self.assertEqual(writtenfile, correctfile)
# make sure correct data is in correct file
self.assertEqual(fdata, file(writtenfile, "rb").read())
os.remove(writtenfile)
zipfp.close()
# remove the test file subdirectories
shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def testExtractAll(self):
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED)
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "r")
zipfp.extractall()
for fpath, fdata in SMALL_TEST_DATA:
if os.path.isabs(fpath):
outfile = os.path.join(os.getcwd(), fpath[1:])
else:
outfile = os.path.join(os.getcwd(), fpath)
self.assertEqual(fdata, file(outfile, "rb").read())
os.remove(outfile)
zipfp.close()
# remove the test file subdirectories
shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def zip_test_writestr_permissions(self, f, compression):
# Make sure that writestr creates files with mode 0600,
# when it is passed a name rather than a ZipInfo instance.
self.makeTestArchive(f, compression)
zipfp = zipfile.ZipFile(f, "r")
zinfo = zipfp.getinfo('strfile')
self.assertEqual(zinfo.external_attr, 0600 << 16)
def test_writestr_permissions(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_test_writestr_permissions(f, zipfile.ZIP_STORED)
def tearDown(self):
os.remove(TESTFN)
os.remove(TESTFN2)
class TestZip64InSmallFiles(unittest.TestCase):
# These tests test the ZIP64 functionality without using large files,
# see test_zipfile64 for proper tests.
def setUp(self):
self._limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = 5
line_gen = ("Test of zipfile line %d." % i for i in range(0, FIXEDTEST_SIZE))
self.data = '\n'.join(line_gen)
# Make a source file with some lines
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
def largeFileExceptionTest(self, f, compression):
zipfp = zipfile.ZipFile(f, "w", compression)
self.assertRaises(zipfile.LargeZipFile,
zipfp.write, TESTFN, "another"+os.extsep+"name")
zipfp.close()
def largeFileExceptionTest2(self, f, compression):
zipfp = zipfile.ZipFile(f, "w", compression)
self.assertRaises(zipfile.LargeZipFile,
zipfp.writestr, "another"+os.extsep+"name", self.data)
zipfp.close()
def testLargeFileException(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.largeFileExceptionTest(f, zipfile.ZIP_STORED)
self.largeFileExceptionTest2(f, zipfile.ZIP_STORED)
def zipTest(self, f, compression):
# Create the ZIP archive
zipfp = zipfile.ZipFile(f, "w", compression, allowZip64=True)
zipfp.write(TESTFN, "another"+os.extsep+"name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
zipfp.close()
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another"+os.extsep+"name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
zipfp.printdir()
finally:
sys.stdout = stdout
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEquals(len(lines), 4) # Number of files + header
self.assert_('File Name' in lines[0])
self.assert_('Modified' in lines[0])
self.assert_('Size' in lines[0])
fn, date, time, size = lines[1].split()
self.assertEquals(fn, 'another.name')
# XXX: timestamp is not tested
self.assertEquals(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEquals(len(names), 3)
self.assert_(TESTFN in names)
self.assert_("another"+os.extsep+"name" in names)
self.assert_("strfile" in names)
# Check infolist
infos = zipfp.infolist()
names = [ i.filename for i in infos ]
self.assertEquals(len(names), 3)
self.assert_(TESTFN in names)
self.assert_("another"+os.extsep+"name" in names)
self.assert_("strfile" in names)
for i in infos:
self.assertEquals(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another"+os.extsep+"name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEquals(info.filename, nm)
self.assertEquals(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
zipfp.close()
def testStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipTest(f, zipfile.ZIP_STORED)
if zlib:
def testDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipTest(f, zipfile.ZIP_DEFLATED)
def testAbsoluteArcnames(self):
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED, allowZip64=True)
zipfp.write(TESTFN, "/absolute")
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED)
self.assertEqual(zipfp.namelist(), ["absolute"])
zipfp.close()
def tearDown(self):
zipfile.ZIP64_LIMIT = self._limit
os.remove(TESTFN)
os.remove(TESTFN2)
class PyZipFileTests(unittest.TestCase):
def testWritePyfile(self):
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
fn = __file__
if fn.endswith('.pyc') or fn.endswith('.pyo'):
fn = fn[:-1]
zipfp.writepy(fn)
bn = os.path.basename(fn)
self.assert_(bn not in zipfp.namelist())
self.assert_(bn + 'o' in zipfp.namelist() or bn + 'c' in zipfp.namelist())
zipfp.close()
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
fn = __file__
if fn.endswith('.pyc') or fn.endswith('.pyo'):
fn = fn[:-1]
zipfp.writepy(fn, "testpackage")
bn = "%s/%s"%("testpackage", os.path.basename(fn))
self.assert_(bn not in zipfp.namelist())
self.assert_(bn + 'o' in zipfp.namelist() or bn + 'c' in zipfp.namelist())
zipfp.close()
def testWritePythonPackage(self):
import email
packagedir = os.path.dirname(email.__file__)
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
zipfp.writepy(packagedir)
# Check for a couple of modules at different levels of the hieararchy
names = zipfp.namelist()
self.assert_('email/__init__.pyo' in names or 'email/__init__.pyc' in names)
self.assert_('email/mime/text.pyo' in names or 'email/mime/text.pyc' in names)
def testWritePythonDirectory(self):
os.mkdir(TESTFN2)
try:
fp = open(os.path.join(TESTFN2, "mod1.py"), "w")
fp.write("print 42\n")
fp.close()
fp = open(os.path.join(TESTFN2, "mod2.py"), "w")
fp.write("print 42 * 42\n")
fp.close()
fp = open(os.path.join(TESTFN2, "mod2.txt"), "w")
fp.write("bla bla bla\n")
fp.close()
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
zipfp.writepy(TESTFN2)
names = zipfp.namelist()
self.assert_('mod1.pyc' in names or 'mod1.pyo' in names)
self.assert_('mod2.pyc' in names or 'mod2.pyo' in names)
self.assert_('mod2.txt' not in names)
finally:
shutil.rmtree(TESTFN2)
def testWriteNonPyfile(self):
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
file(TESTFN, 'w').write('most definitely not a python file')
self.assertRaises(RuntimeError, zipfp.writepy, TESTFN)
os.remove(TESTFN)
class OtherTests(unittest.TestCase):
def testUnicodeFilenames(self):
zf = zipfile.ZipFile(TESTFN, "w")
zf.writestr(u"foo.txt", "Test for unicode filename")
zf.writestr(u"\xf6.txt", "Test for unicode filename")
self.assertTrue(isinstance(zf.infolist()[0].filename, unicode))
zf.close()
zf = zipfile.ZipFile(TESTFN, "r")
self.assertEqual(zf.filelist[0].filename, "foo.txt")
self.assertEqual(zf.filelist[1].filename, u"\xf6.txt")
zf.close()
def testCreateNonExistentFileForAppend(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
filename = 'testfile.txt'
content = 'hello, world. this is some content.'
try:
zf = zipfile.ZipFile(TESTFN, 'a')
zf.writestr(filename, content)
zf.close()
except IOError, (errno, errmsg):
self.fail('Could not append data to a non-existent zip file.')
self.assert_(os.path.exists(TESTFN))
zf = zipfile.ZipFile(TESTFN, 'r')
self.assertEqual(zf.read(filename), content)
zf.close()
def testCloseErroneousFile(self):
# This test checks that the ZipFile constructor closes the file object
# it opens if there's an error in the file. If it doesn't, the traceback
# holds a reference to the ZipFile object and, indirectly, the file object.
# On Windows, this causes the os.unlink() call to fail because the
# underlying file is still open. This is SF bug #412214.
#
fp = open(TESTFN, "w")
fp.write("this is not a legal zip file\n")
fp.close()
try:
zf = zipfile.ZipFile(TESTFN)
except zipfile.BadZipfile:
pass
def testIsZipErroneousFile(self):
# This test checks that the is_zipfile function correctly identifies
# a file that is not a zip file
fp = open(TESTFN, "w")
fp.write("this is not a legal zip file\n")
fp.close()
chk = zipfile.is_zipfile(TESTFN)
self.assert_(chk is False)
def testIsZipValidFile(self):
# This test checks that the is_zipfile function correctly identifies
# a file that is a zip file
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
chk = zipfile.is_zipfile(TESTFN)
self.assert_(chk is True)
def testNonExistentFileRaisesIOError(self):
# make sure we don't raise an AttributeError when a partially-constructed
# ZipFile instance is finalized; this tests for regression on SF tracker
# bug #403871.
# The bug we're testing for caused an AttributeError to be raised
# when a ZipFile instance was created for a file that did not
# exist; the .fp member was not initialized but was needed by the
# __del__() method. Since the AttributeError is in the __del__(),
# it is ignored, but the user should be sufficiently annoyed by
# the message on the output that regression will be noticed
# quickly.
self.assertRaises(IOError, zipfile.ZipFile, TESTFN)
def testClosedZipRaisesRuntimeError(self):
# Verify that testzip() doesn't swallow inappropriate exceptions.
data = StringIO()
zipf = zipfile.ZipFile(data, mode="w")
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
# This is correct; calling .read on a closed ZipFile should throw
# a RuntimeError, and so should calling .testzip. An earlier
# version of .testzip would swallow this exception (and any other)
# and report that the first file in the archive was corrupt.
self.assertRaises(RuntimeError, zipf.read, "foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt")
self.assertRaises(RuntimeError, zipf.testzip)
self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus")
file(TESTFN, 'w').write('zipfile test data')
self.assertRaises(RuntimeError, zipf.write, TESTFN)
def test_BadConstructorMode(self):
# Check that bad modes passed to ZipFile constructor are caught
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "q")
def test_BadOpenMode(self):
# Check that bad modes passed to ZipFile.open are caught
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipf = zipfile.ZipFile(TESTFN, mode="r")
# read the data to make sure the file is there
zipf.read("foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt", "q")
zipf.close()
def test_Read0(self):
# Check that calling read(0) on a ZipExtFile object returns an empty
# string and doesn't advance file pointer
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# read the data to make sure the file is there
f = zipf.open("foo.txt")
for i in xrange(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(), "O, for a Muse of Fire!")
zipf.close()
def test_OpenNonexistentItem(self):
# Check that attempting to call open() for an item that doesn't
# exist in the archive raises a RuntimeError
zipf = zipfile.ZipFile(TESTFN, mode="w")
self.assertRaises(KeyError, zipf.open, "foo.txt", "r")
def test_BadCompressionMode(self):
# Check that bad compression methods passed to ZipFile.open are caught
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "w", -1)
def test_NullByteInFilename(self):
# Check that a filename containing a null byte is properly terminated
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.writestr("foo.txt\x00qqq", "O, for a Muse of Fire!")
self.assertEqual(zipf.namelist(), ['foo.txt'])
def test_StructSizes(self):
# check that ZIP internal structure sizes are calculated correctly
self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
def testComments(self):
# This test checks that comments on the archive are handled properly
# check default comment is empty
zipf = zipfile.ZipFile(TESTFN, mode="w")
self.assertEqual(zipf.comment, '')
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipfr = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(zipfr.comment, '')
zipfr.close()
# check a simple short comment
comment = 'Bravely taking to his feet, he beat a very brave retreat.'
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.comment = comment
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipfr = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(zipfr.comment, comment)
zipfr.close()
# check a comment of max length
comment2 = ''.join(['%d' % (i**3 % 10) for i in xrange((1 << 16)-1)])
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.comment = comment2
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipfr = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(zipfr.comment, comment2)
zipfr.close()
# check a comment that is too long is truncated
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.comment = comment2 + 'oops'
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipfr = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(zipfr.comment, comment2)
zipfr.close()
def tearDown(self):
support.unlink(TESTFN)
support.unlink(TESTFN2)
class DecryptionTests(unittest.TestCase):
# This test checks that ZIP decryption works. Since the library does not
# support encryption at the moment, we use a pre-generated encrypted
# ZIP file
data = (
'PK\x03\x04\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00\x1a\x00'
'\x00\x00\x08\x00\x00\x00test.txt\xfa\x10\xa0gly|\xfa-\xc5\xc0=\xf9y'
'\x18\xe0\xa8r\xb3Z}Lg\xbc\xae\xf9|\x9b\x19\xe4\x8b\xba\xbb)\x8c\xb0\xdbl'
'PK\x01\x02\x14\x00\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00'
'\x1a\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x01\x00 \x00\xb6\x81'
'\x00\x00\x00\x00test.txtPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x006\x00'
'\x00\x00L\x00\x00\x00\x00\x00' )
data2 = (
'PK\x03\x04\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02'
'\x00\x00\x04\x00\x15\x00zeroUT\t\x00\x03\xd6\x8b\x92G\xda\x8b\x92GUx\x04'
'\x00\xe8\x03\xe8\x03\xc7<M\xb5a\xceX\xa3Y&\x8b{oE\xd7\x9d\x8c\x98\x02\xc0'
'PK\x07\x08xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00PK\x01\x02\x17\x03'
'\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00'
'\x04\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00\x00\x00\x00ze'
'roUT\x05\x00\x03\xd6\x8b\x92GUx\x00\x00PK\x05\x06\x00\x00\x00\x00\x01'
'\x00\x01\x00?\x00\x00\x00[\x00\x00\x00\x00\x00' )
plain = 'zipfile.py encryption test'
plain2 = '\x00'*512
def setUp(self):
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
self.zip = zipfile.ZipFile(TESTFN, "r")
fp = open(TESTFN2, "wb")
fp.write(self.data2)
fp.close()
self.zip2 = zipfile.ZipFile(TESTFN2, "r")
def tearDown(self):
self.zip.close()
os.unlink(TESTFN)
self.zip2.close()
os.unlink(TESTFN2)
def testNoPassword(self):
# Reading the encrypted file without password
# must generate a RunTime exception
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
def testBadPassword(self):
self.zip.setpassword("perl")
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.zip2.setpassword("perl")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
def testGoodPassword(self):
self.zip.setpassword("python")
self.assertEquals(self.zip.read("test.txt"), self.plain)
self.zip2.setpassword("12345")
self.assertEquals(self.zip2.read("zero"), self.plain2)
class TestsWithRandomBinaryFiles(unittest.TestCase):
def setUp(self):
datacount = randint(16, 64)*1024 + randint(1, 1024)
self.data = ''.join((struct.pack('<f', random()*randint(-1000, 1000)) for i in xrange(datacount)))
# Make a source file with some lines
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
def tearDown(self):
support.unlink(TESTFN)
support.unlink(TESTFN2)
def makeTestArchive(self, f, compression):
# Create the ZIP archive
zipfp = zipfile.ZipFile(f, "w", compression)
zipfp.write(TESTFN, "another"+os.extsep+"name")
zipfp.write(TESTFN, TESTFN)
zipfp.close()
def zipTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
testdata = zipfp.read(TESTFN)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
self.assertEqual(zipfp.read("another"+os.extsep+"name"), self.data)
zipfp.close()
def testStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipTest(f, zipfile.ZIP_STORED)
def zipOpenTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
zipdata1 = []
zipopen1 = zipfp.open(TESTFN)
while 1:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
zipopen2 = zipfp.open("another"+os.extsep+"name")
while 1:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
testdata1 = ''.join(zipdata1)
self.assertEqual(len(testdata1), len(self.data))
self.assertEqual(testdata1, self.data)
testdata2 = ''.join(zipdata2)
self.assertEqual(len(testdata1), len(self.data))
self.assertEqual(testdata1, self.data)
zipfp.close()
def testOpenStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipOpenTest(f, zipfile.ZIP_STORED)
def zipRandomOpenTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
zipdata1 = []
zipopen1 = zipfp.open(TESTFN)
while 1:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
testdata = ''.join(zipdata1)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
zipfp.close()
def testRandomOpenStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipRandomOpenTest(f, zipfile.ZIP_STORED)
class TestsWithMultipleOpens(unittest.TestCase):
def setUp(self):
# Create the ZIP archive
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_DEFLATED)
zipfp.writestr('ones', '1'*FIXEDTEST_SIZE)
zipfp.writestr('twos', '2'*FIXEDTEST_SIZE)
zipfp.close()
def testSameFile(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
zipf = zipfile.ZipFile(TESTFN2, mode="r")
zopen1 = zipf.open('ones')
zopen2 = zipf.open('ones')
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, data2)
zipf.close()
def testDifferentFile(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
zipf = zipfile.ZipFile(TESTFN2, mode="r")
zopen1 = zipf.open('ones')
zopen2 = zipf.open('twos')
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, '1'*FIXEDTEST_SIZE)
self.assertEqual(data2, '2'*FIXEDTEST_SIZE)
zipf.close()
def testInterleaved(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
zipf = zipfile.ZipFile(TESTFN2, mode="r")
zopen1 = zipf.open('ones')
data1 = zopen1.read(500)
zopen2 = zipf.open('twos')
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, '1'*FIXEDTEST_SIZE)
self.assertEqual(data2, '2'*FIXEDTEST_SIZE)
zipf.close()
def tearDown(self):
os.remove(TESTFN2)
class TestWithDirectory(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN2)
def testExtractDir(self):
zipf = zipfile.ZipFile(findfile("zipdir.zip"))
zipf.extractall(TESTFN2)
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a")))
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a", "b")))
self.assertTrue(os.path.exists(os.path.join(TESTFN2, "a", "b", "c")))
def testStoreDir(self):
os.mkdir(os.path.join(TESTFN2, "x"))
zipf = zipfile.ZipFile(TESTFN, "w")
zipf.write(os.path.join(TESTFN2, "x"), "x")
self.assertTrue(zipf.filelist[0].filename.endswith("x/"))
def tearDown(self):
shutil.rmtree(TESTFN2)
if os.path.exists(TESTFN):
os.remove(TESTFN)
class UniversalNewlineTests(unittest.TestCase):
def setUp(self):
self.line_gen = ["Test of zipfile line %d." % i for i in xrange(FIXEDTEST_SIZE)]
self.seps = ('\r', '\r\n', '\n')
self.arcdata, self.arcfiles = {}, {}
for n, s in enumerate(self.seps):
self.arcdata[s] = s.join(self.line_gen) + s
self.arcfiles[s] = '%s-%d' % (TESTFN, n)
open(self.arcfiles[s], "wb").write(self.arcdata[s])
def makeTestArchive(self, f, compression):
# Create the ZIP archive
zipfp = zipfile.ZipFile(f, "w", compression)
for fn in self.arcfiles.values():
zipfp.write(fn, fn)
zipfp.close()
def readTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for sep, fn in self.arcfiles.items():
zipdata = zipfp.open(fn, "rU").read()
self.assertEqual(self.arcdata[sep], zipdata)
zipfp.close()
def readlineTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for sep, fn in self.arcfiles.items():
zipopen = zipfp.open(fn, "rU")
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line + '\n')
zipfp.close()
def readlinesTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for sep, fn in self.arcfiles.items():
ziplines = zipfp.open(fn, "rU").readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line + '\n')
zipfp.close()
def iterlinesTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for sep, fn in self.arcfiles.items():
for line, zipline in zip(self.line_gen, zipfp.open(fn, "rU")):
self.assertEqual(zipline, line + '\n')
zipfp.close()
def testReadStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readTest(f, zipfile.ZIP_STORED)
def testReadlineStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readlineTest(f, zipfile.ZIP_STORED)
def testReadlinesStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readlinesTest(f, zipfile.ZIP_STORED)
def testIterlinesStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.iterlinesTest(f, zipfile.ZIP_STORED)
if zlib:
def testReadDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readTest(f, zipfile.ZIP_DEFLATED)
def testReadlineDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readlineTest(f, zipfile.ZIP_DEFLATED)
def testReadlinesDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readlinesTest(f, zipfile.ZIP_DEFLATED)
def testIterlinesDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.iterlinesTest(f, zipfile.ZIP_DEFLATED)
def tearDown(self):
for sep, fn in self.arcfiles.items():
os.remove(fn)
support.unlink(TESTFN)
support.unlink(TESTFN2)
def test_main():
run_unittest(TestsWithSourceFile, TestZip64InSmallFiles, OtherTests,
PyZipFileTests, DecryptionTests, TestsWithMultipleOpens,
TestWithDirectory,
UniversalNewlineTests, TestsWithRandomBinaryFiles)
if __name__ == "__main__":
test_main()
|
Explosound/ExplosoundCluster | refs/heads/master | pymir/MFCC.py | 2 | """
MFCC methods
Compute Mel-Frequency Cepstral Coefficients
Last updated: 17 December 2012
"""
from __future__ import division
import math
import numpy
from numpy import *
import scipy
from scipy.fftpack import *
def mfcc2(spectrum, numFilters = 32):
"""
Alternative (and vectorized) MFCC computation from Steve Tjoa
"""
fb = filterbank(spectrum, spectrum.sampleRate, numFilters)
coeff = scipy.fftpack.dct(scipy.log(fb), type = 2, norm = 'ortho')
return coeff
def filterbank(x, fs, numFilters):
n = len(x)
m = 2 ** (1.0 / 6)
f2 = 110.0
f1 = f2 / m
f3 = f2 * m
fb = scipy.array(scipy.zeros(numFilters))
for i in range(numFilters):
fb[i] = numpy.absolute(fbwin(x, fs, f1, f2, f3))
f1 = f2
f2 = f3
f3 = f3 * m
return fb
def fbwin(x, fs, f1, f2, f3):
n = len(x)
b1 = int(n * f1 / fs)
b2 = int(n * f2 / fs)
b3 = int(n * f3 / fs)
y = x[b2]
for b in range(b1, b2):
y = y + x[b] * (b - b1) / (b2 - b1)
for b in range(b2 + 1, b3):
y = y + x[b] * (1 - (b - b2) / (b3 - b2))
return y
def mfcc(spectrum, m, NumFilters = 48):
"""
Compute the Mth Mel-Frequency Cepstral Coefficient
"""
result = 0
outerSum = 0
innerSum = 0
binSize = len(spectrum)
if m >= NumFilters:
return 0 # This represents an error condition - the specified coefficient is greater than or equal to the number of filters. The behavior in this case is undefined.
result = normalizationFactor(NumFilters, m)
for filterBand in range(1, NumFilters + 1):
# Compute inner sum
innerSum = 0
for frequencyBand in range(0, binSize - 1):
innerSum = innerSum + abs(spectrum[frequencyBand] * filterParameter(binSize, frequencyBand, filterBand, spectrum.sampleRate))
if innerSum > 0:
innerSum = log(innerSum) # The log of 0 is undefined, so don't use it
innerSum = innerSum * math.cos(((m * math.pi) / NumFilters) * (filterBand - 0.5))
outerSum = outerSum + innerSum
result = result * outerSum
return result
def normalizationFactor(NumFilters, m):
"""
Intermediate computation used by mfcc function.
Computes a normalization factor
"""
normalizationFactor = 0
if m == 0:
normalizationFactor = math.sqrt(1.0 / NumFilters)
else:
normalizationFactor = math.sqrt(2.0 / NumFilters)
return normalizationFactor
def filterParameter(binSize, frequencyBand, filterBand, samplingRate):
"""
Intermediate computation used by the mfcc function.
Compute the filter parameter for the specified frequency and filter bands
"""
filterParameter = 0
boundary = (frequencyBand * samplingRate) / float(binSize) # k * Fs / N
prevCenterFrequency = getCenterFrequency(filterBand - 1) # fc(l - 1)
thisCenterFrequency = getCenterFrequency(filterBand) # fc(l)
nextCenterFrequency = getCenterFrequency(filterBand + 1) # fc(l + 1)
if boundary >= 0 and boundary < prevCenterFrequency:
filterParameter = 0
elif boundary >= prevCenterFrequency and boundary < thisCenterFrequency:
filterParameter = (boundary - prevCenterFrequency) / (thisCenterFrequency - prevCenterFrequency)
filterParameter = filterParameter * getMagnitudeFactor(filterBand)
elif boundary >= thisCenterFrequency and boundary < nextCenterFrequency:
filterParameter = (boundary - nextCenterFrequency) / (thisCenterFrequency - nextCenterFrequency)
filterParameter = filterParameter * getMagnitudeFactor(filterBand)
elif boundary >= nextCenterFrequency and boundary < samplingRate:
filterParameter = 0
return filterParameter
def getMagnitudeFactor(filterBand):
"""
Intermediate computation used by the mfcc function.
Compute the band-dependent magnitude factor for the given filter band
"""
magnitudeFactor = 0
if filterBand >= 1 and filterBand <= 14:
magnitudeFactor = 0.015
elif filterBand >= 15 and filterBand <= 48:
magnitudeFactor = 2.0 / (getCenterFrequency(filterBand + 1) - getCenterFrequency(filterBand - 1))
return magnitudeFactor
def getCenterFrequency(filterBand):
"""
Intermediate computation used by the mfcc function.
Compute the center frequency (fc) of the specified filter band (l)
This where the mel-frequency scaling occurs. Filters are specified so that their
center frequencies are equally spaced on the mel scale
"""
centerFrequency = 0
if filterBand == 0:
centerFrequency = 0;
elif filterBand >= 1 and filterBand <= 14:
centerFrequency = (200.0 * filterBand) / 3.0
else:
exponent = filterBand - 14
centerFrequency = math.pow(1.0711703, exponent)
centerFrequency = centerFrequency * 1073.4
return centerFrequency |
caLew/sugartest | refs/heads/master | src/jarabe/__init__.py | 18 | """OLPC Sugar Graphical "Shell" Interface
Provides the shell-level operations for managing
the OLPC laptop computers. It interacts heavily
with (and depends upon) the Sugar UI libraries.
This is a "graphical" shell, the name does not
refer to a command-line "shell" interface.
"""
# Copyright (C) 2006-2007, Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
iledarn/access-addons | refs/heads/9.0 | access_apps/__init__.py | 1350 | import controllers
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.