repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
mbauskar/omnitech-erpnext
|
refs/heads/master
|
erpnext/accounts/general_ledger.py
|
17
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint
from frappe import _
from frappe.model.meta import get_field_precision
from erpnext.accounts.utils import validate_expense_against_budget
class StockAccountInvalidTransaction(frappe.ValidationError): pass
def make_gl_entries(gl_map, cancel=False, adv_adj=False, merge_entries=True, update_outstanding='Yes'):
if gl_map:
if not cancel:
gl_map = process_gl_map(gl_map, merge_entries)
if gl_map and len(gl_map) > 1:
save_entries(gl_map, adv_adj, update_outstanding)
else:
frappe.throw(_("Incorrect number of General Ledger Entries found. You might have selected a wrong Account in the transaction."))
else:
delete_gl_entries(gl_map, adv_adj=adv_adj, update_outstanding=update_outstanding)
def process_gl_map(gl_map, merge_entries=True):
if merge_entries:
gl_map = merge_similar_entries(gl_map)
for entry in gl_map:
# toggle debit, credit if negative entry
if flt(entry.debit) < 0:
entry.credit = flt(entry.credit) - flt(entry.debit)
entry.debit = 0.0
if flt(entry.debit_in_account_currency) < 0:
entry.credit_in_account_currency = \
flt(entry.credit_in_account_currency) - flt(entry.debit_in_account_currency)
entry.debit_in_account_currency = 0.0
if flt(entry.credit) < 0:
entry.debit = flt(entry.debit) - flt(entry.credit)
entry.credit = 0.0
if flt(entry.credit_in_account_currency) < 0:
entry.debit_in_account_currency = \
flt(entry.debit_in_account_currency) - flt(entry.credit_in_account_currency)
entry.credit_in_account_currency = 0.0
return gl_map
def merge_similar_entries(gl_map):
merged_gl_map = []
for entry in gl_map:
# if there is already an entry in this account then just add it
# to that entry
same_head = check_if_in_list(entry, merged_gl_map)
if same_head:
same_head.debit = flt(same_head.debit) + flt(entry.debit)
same_head.debit_in_account_currency = \
flt(same_head.debit_in_account_currency) + flt(entry.debit_in_account_currency)
same_head.credit = flt(same_head.credit) + flt(entry.credit)
same_head.credit_in_account_currency = \
flt(same_head.credit_in_account_currency) + flt(entry.credit_in_account_currency)
else:
merged_gl_map.append(entry)
# filter zero debit and credit entries
merged_gl_map = filter(lambda x: flt(x.debit, 9)!=0 or flt(x.credit, 9)!=0, merged_gl_map)
return merged_gl_map
def check_if_in_list(gle, gl_map):
for e in gl_map:
if e.account == gle.account \
and cstr(e.get('party_type'))==cstr(gle.get('party_type')) \
and cstr(e.get('party'))==cstr(gle.get('party')) \
and cstr(e.get('against_voucher'))==cstr(gle.get('against_voucher')) \
and cstr(e.get('against_voucher_type')) == cstr(gle.get('against_voucher_type')) \
and cstr(e.get('cost_center')) == cstr(gle.get('cost_center')):
return e
def save_entries(gl_map, adv_adj, update_outstanding):
validate_account_for_auto_accounting_for_stock(gl_map)
round_off_debit_credit(gl_map)
for entry in gl_map:
make_entry(entry, adv_adj, update_outstanding)
# check against budget
validate_expense_against_budget(entry)
def make_entry(args, adv_adj, update_outstanding):
args.update({"doctype": "GL Entry"})
gle = frappe.get_doc(args)
gle.flags.ignore_permissions = 1
gle.insert()
gle.run_method("on_update_with_args", adv_adj, update_outstanding)
gle.submit()
def validate_account_for_auto_accounting_for_stock(gl_map):
if cint(frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock")) \
and gl_map[0].voucher_type=="Journal Entry":
aii_accounts = [d[0] for d in frappe.db.sql("""select name from tabAccount
where account_type = 'Warehouse' and ifnull(warehouse, '')!=''""")]
for entry in gl_map:
if entry.account in aii_accounts:
frappe.throw(_("Account: {0} can only be updated via Stock Transactions")
.format(entry.account), StockAccountInvalidTransaction)
def round_off_debit_credit(gl_map):
precision = get_field_precision(frappe.get_meta("GL Entry").get_field("debit"),
currency=frappe.db.get_value("Company", gl_map[0].company, "default_currency", cache=True))
debit_credit_diff = 0.0
for entry in gl_map:
entry.debit = flt(entry.debit, precision)
entry.credit = flt(entry.credit, precision)
debit_credit_diff += entry.debit - entry.credit
debit_credit_diff = flt(debit_credit_diff, precision)
if abs(debit_credit_diff) >= (5.0 / (10**precision)):
frappe.throw(_("Debit and Credit not equal for {0} #{1}. Difference is {2}.")
.format(gl_map[0].voucher_type, gl_map[0].voucher_no, debit_credit_diff))
elif abs(debit_credit_diff) >= (1.0 / (10**precision)):
make_round_off_gle(gl_map, debit_credit_diff)
def make_round_off_gle(gl_map, debit_credit_diff):
round_off_account, round_off_cost_center = frappe.db.get_value("Company", gl_map[0].company,
["round_off_account", "round_off_cost_center"]) or [None, None]
if not round_off_account:
frappe.throw(_("Please mention Round Off Account in Company"))
if not round_off_cost_center:
frappe.throw(_("Please mention Round Off Cost Center in Company"))
round_off_gle = frappe._dict()
for k in ["voucher_type", "voucher_no", "company",
"posting_date", "remarks", "fiscal_year", "is_opening"]:
round_off_gle[k] = gl_map[0][k]
round_off_gle.update({
"account": round_off_account,
"debit": abs(debit_credit_diff) if debit_credit_diff < 0 else 0,
"credit": debit_credit_diff if debit_credit_diff > 0 else 0,
"cost_center": round_off_cost_center,
"party_type": None,
"party": None,
"against_voucher_type": None,
"against_voucher": None
})
gl_map.append(round_off_gle)
def delete_gl_entries(gl_entries=None, voucher_type=None, voucher_no=None,
adv_adj=False, update_outstanding="Yes"):
from erpnext.accounts.doctype.gl_entry.gl_entry import validate_balance_type, \
check_freezing_date, update_outstanding_amt, validate_frozen_account
if not gl_entries:
gl_entries = frappe.db.sql("""select * from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no), as_dict=True)
if gl_entries:
check_freezing_date(gl_entries[0]["posting_date"], adv_adj)
frappe.db.sql("""delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s""",
(voucher_type or gl_entries[0]["voucher_type"], voucher_no or gl_entries[0]["voucher_no"]))
for entry in gl_entries:
validate_frozen_account(entry["account"], adv_adj)
validate_balance_type(entry["account"], adv_adj)
validate_expense_against_budget(entry)
if entry.get("against_voucher") and update_outstanding == 'Yes':
update_outstanding_amt(entry["account"], entry.get("party_type"), entry.get("party"), entry.get("against_voucher_type"),
entry.get("against_voucher"), on_cancel=True)
|
johnny9/zulip
|
refs/heads/master
|
tools/run-dev.py
|
114
|
#!/usr/bin/env python
import optparse
import subprocess
import signal
import traceback
import sys
import os
from twisted.internet import reactor
from twisted.web import proxy, server, resource
# Monkey-patch twisted.web.http to avoid request.finish exceptions
# https://trac.zulip.net/ticket/1728
from twisted.web.http import Request
orig_finish = Request.finish
def patched_finish(self):
if self._disconnected:
return
return orig_finish(self)
Request.finish = patched_finish
parser = optparse.OptionParser(r"""
Starts the app listening on localhost, for local development.
This script launches the Django and Tornado servers, then runs a reverse proxy
which serves to both of them. After it's all up and running, browse to
http://localhost:9991/
Note that, while runserver and runtornado have the usual auto-restarting
behavior, the reverse proxy itself does *not* automatically restart on changes
to this file.
""")
parser.add_option('--test',
action='store_true', dest='test',
help='Use the testing database and ports')
parser.add_option('--interface',
action='store', dest='interface',
default='127.0.0.1', help='Set the IP or hostname for the proxy to listen on')
(options, args) = parser.parse_args()
base_port = 9991
manage_args = ''
if options.test:
base_port = 9981
settings_module = "zproject.test_settings"
else:
settings_module = "zproject.settings"
manage_args = ['--settings=%s' % (settings_module,)]
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
proxy_port = base_port
django_port = base_port+1
tornado_port = base_port+2
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
# Clean up stale .pyc files etc.
subprocess.check_call('./tools/clean-repo')
# Set up a new process group, so that we can later kill run{server,tornado}
# and all of the processes they spawn.
os.setpgrp()
# Pass --nostatic because we configure static serving ourselves in
# zulip/urls.py.
cmds = [['./tools/compile-handlebars-templates', 'forever'],
['python', 'manage.py', 'runserver', '--nostatic'] +
manage_args + ['localhost:%d' % (django_port,)],
['python', 'manage.py', 'runtornado'] +
manage_args + ['localhost:%d' % (tornado_port,)],
['./tools/run-dev-queue-processors'] + manage_args,
['env', 'PGHOST=localhost', # Force password authentication using .pgpass
'./puppet/zulip/files/postgresql/process_fts_updates']]
for cmd in cmds:
subprocess.Popen(cmd)
class Resource(resource.Resource):
def getChild(self, name, request):
# Assume an HTTP 1.1 request
proxy_host = request.requestHeaders.getRawHeaders('Host')
request.requestHeaders.setRawHeaders('X-Forwarded-Host', proxy_host)
if (request.uri in ['/json/get_events'] or
request.uri.startswith('/json/events') or
request.uri.startswith('/api/v1/events') or
request.uri.startswith('/sockjs')):
return proxy.ReverseProxyResource('localhost', tornado_port, '/'+name)
return proxy.ReverseProxyResource('localhost', django_port, '/'+name)
try:
reactor.listenTCP(proxy_port, server.Site(Resource()), interface=options.interface)
reactor.run()
except:
# Print the traceback before we get SIGTERM and die.
traceback.print_exc()
raise
finally:
# Kill everything in our process group.
os.killpg(0, signal.SIGTERM)
|
Ayub-Khan/edx-platform
|
refs/heads/master
|
common/djangoapps/edxmako/tests.py
|
20
|
from mock import patch, Mock
import unittest
import ddt
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from django.test.utils import override_settings
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
import edxmako.middleware
from edxmako.middleware import get_template_request_context
from edxmako import add_lookup, LOOKUP
from edxmako.shortcuts import (
marketing_link,
is_marketing_link_set,
is_any_marketing_link_set,
render_to_string,
open_source_footer_context_processor
)
from student.tests.factories import UserFactory
from util.testing import UrlResetMixin
@ddt.ddt
class ShortcutsTests(UrlResetMixin, TestCase):
"""
Test the edxmako shortcuts file
"""
@override_settings(MKTG_URLS={'ROOT': 'dummy-root', 'ABOUT': '/about-us'})
@override_settings(MKTG_URL_LINK_MAP={'ABOUT': 'login'})
def test_marketing_link(self):
# test marketing site on
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
expected_link = 'dummy-root/about-us'
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
# test marketing site off
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}):
# we are using login because it is common across both cms and lms
expected_link = reverse('login')
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
@override_settings(MKTG_URLS={'ROOT': 'dummy-root', 'ABOUT': '/about-us'})
@override_settings(MKTG_URL_LINK_MAP={'ABOUT': 'login'})
def test_is_marketing_link_set(self):
# test marketing site on
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
self.assertTrue(is_marketing_link_set('ABOUT'))
self.assertFalse(is_marketing_link_set('NOT_CONFIGURED'))
# test marketing site off
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}):
self.assertTrue(is_marketing_link_set('ABOUT'))
self.assertFalse(is_marketing_link_set('NOT_CONFIGURED'))
@override_settings(MKTG_URLS={'ROOT': 'dummy-root', 'ABOUT': '/about-us'})
@override_settings(MKTG_URL_LINK_MAP={'ABOUT': 'login'})
def test_is_any_marketing_link_set(self):
# test marketing site on
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
self.assertTrue(is_any_marketing_link_set(['ABOUT']))
self.assertTrue(is_any_marketing_link_set(['ABOUT', 'NOT_CONFIGURED']))
self.assertFalse(is_any_marketing_link_set(['NOT_CONFIGURED']))
# test marketing site off
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}):
self.assertTrue(is_any_marketing_link_set(['ABOUT']))
self.assertTrue(is_any_marketing_link_set(['ABOUT', 'NOT_CONFIGURED']))
self.assertFalse(is_any_marketing_link_set(['NOT_CONFIGURED']))
@ddt.data((True, None), (False, None))
@ddt.unpack
def test_edx_footer(self, expected_result, _):
with patch.dict('django.conf.settings.FEATURES', {
'IS_EDX_DOMAIN': expected_result
}):
result = open_source_footer_context_processor({})
self.assertEquals(expected_result, result.get('IS_EDX_DOMAIN'))
class AddLookupTests(TestCase):
"""
Test the `add_lookup` function.
"""
@patch('edxmako.LOOKUP', {})
def test_with_package(self):
add_lookup('test', 'management', __name__)
dirs = LOOKUP['test'].directories
self.assertEqual(len(dirs), 1)
self.assertTrue(dirs[0].endswith('management'))
class MakoMiddlewareTest(TestCase):
"""
Test MakoMiddleware.
"""
def setUp(self):
super(MakoMiddlewareTest, self).setUp()
self.middleware = edxmako.middleware.MakoMiddleware()
self.user = UserFactory.create()
self.url = "/"
self.request = RequestFactory().get(self.url)
self.request.user = self.user
self.response = Mock(spec=HttpResponse)
def test_clear_request_context_variable(self):
"""
Test the global variable requestcontext is cleared correctly
when response middleware is called.
"""
self.middleware.process_request(self.request)
# requestcontext should not be None.
self.assertIsNotNone(get_template_request_context())
self.middleware.process_response(self.request, self.response)
# requestcontext should be None.
self.assertIsNone(get_template_request_context())
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@patch("edxmako.middleware.REQUEST_CONTEXT")
def test_render_to_string_when_no_global_context_lms(self, context_mock):
"""
Test render_to_string() when makomiddleware has not initialized
the threadlocal REQUEST_CONTEXT.context. This is meant to run in LMS.
"""
del context_mock.context
self.assertIn("this module is temporarily unavailable", render_to_string("courseware/error-message.html", None))
@unittest.skipUnless(settings.ROOT_URLCONF == 'cms.urls', 'Test only valid in cms')
@patch("edxmako.middleware.REQUEST_CONTEXT")
def test_render_to_string_when_no_global_context_cms(self, context_mock):
"""
Test render_to_string() when makomiddleware has not initialized
the threadlocal REQUEST_CONTEXT.context. This is meant to run in CMS.
"""
del context_mock.context
self.assertIn("We're having trouble rendering your component", render_to_string("html_error.html", None))
def mako_middleware_process_request(request):
"""
Initialize the global RequestContext variable
edxmako.middleware.requestcontext using the request object.
"""
mako_middleware = edxmako.middleware.MakoMiddleware()
mako_middleware.process_request(request)
|
davidsb30/eve-wspace
|
refs/heads/master
|
evewspace/search/views.py
|
17
|
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse, Http404
import registry
# Create your views here.
def search_view(request, search):
"""
This view instnatiates the proper search class and returns
the result_json as an HttpResponse.
"""
try:
searchClass = registry.registry[search]
except KeyError:
raise Http404
return HttpResponse(searchClass(request).result_json())
|
joelddiaz/openshift-tools
|
refs/heads/prod
|
openshift/installer/vendored/openshift-ansible-3.8.36-1/roles/lib_openshift/src/class/oc_secret.py
|
32
|
# pylint: skip-file
# flake8: noqa
# pylint: skip-file
# pylint: disable=wrong-import-position,wrong-import-order
import base64
# pylint: disable=too-many-arguments
class OCSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
def __init__(self,
namespace,
secret_name=None,
secret_type=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
self.type = secret_type
self.decode = decode
def get(self):
'''return a secret by name '''
results = self._get('secrets', self.name)
results['decoded'] = {}
results['exists'] = False
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if self.decode:
if 'data' in results['results'][0]:
for sname, value in results['results'][0]['data'].items():
results['decoded'][sname] = base64.b64decode(value)
if results['returncode'] != 0 and '"%s" not found' % self.name in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a secret by name'''
return self._delete('secrets', self.name)
def create(self, files=None, contents=None, force=False):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
if self.type is not None:
cmd.append("--type=%s" % (self.type))
if force:
cmd.append('--confirm')
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
return results
def update(self, files, force=False):
'''run update secret
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
secret = self.prep_secret(files, force=force)
if secret['returncode'] != 0:
return secret
sfile_path = '/tmp/%s' % self.name
with open(sfile_path, 'w') as sfd:
sfd.write(json.dumps(secret['results']))
atexit.register(Utils.cleanup, [sfile_path])
return self._replace(sfile_path, force=force)
def prep_secret(self, files=None, contents=None, force=False):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
if self.type is not None:
cmd.extend(["--type=%s" % (self.type)])
if force:
cmd.append('--confirm')
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocsecret = OCSecret(params['namespace'],
params['name'],
params['type'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = ocsecret.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, state: 'list'}
if not params['name']:
return {'failed': True,
'msg': 'Please specify a name when state is absent|present.'}
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], params['name']):
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = ocsecret.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
if state == 'present':
if params['files']:
files = params['files']
elif params['contents']:
files = Utils.create_tmp_files_from_contents(params['contents'])
else:
files = [{'name': 'null', 'path': os.devnull}]
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True,
'msg': 'Would have performed a create.'}
api_rval = ocsecret.create(files, params['contents'], force=params['force'])
# Remove files
if files and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
return {'failed': True,
'msg': api_rval}
return {'changed': True,
'results': api_rval,
'state': 'present'}
########
# Update
########
secret = ocsecret.prep_secret(params['files'], params['contents'], force=params['force'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
# Remove files
if files and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
return {'changed': False,
'results': secret['results'],
'state': 'present'}
if check_mode:
return {'changed': True,
'msg': 'Would have performed an update.'}
api_rval = ocsecret.update(files, force=params['force'])
# Remove files
if secret and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
return {'failed': True,
'msg': api_rval}
return {'changed': True,
'results': api_rval,
'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
|
stven/headphones
|
refs/heads/develop
|
lib/requests/packages/chardet/sbcsgroupprober.py
|
235
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from charsetgroupprober import CharSetGroupProber
from sbcharsetprober import SingleByteCharSetProber
from langcyrillicmodel import Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model
from langgreekmodel import Latin7GreekModel, Win1253GreekModel
from langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from langthaimodel import TIS620ThaiModel
from langhebrewmodel import Win1255HebrewModel
from hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [ \
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.True, hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber])
self.reset()
|
DavidNorman/tensorflow
|
refs/heads/master
|
tensorflow/python/compiler/tensorrt/test/base_test.py
|
7
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic tests for TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class SimpleSingleEngineTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing single segment."""
dtype = inp.dtype
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
bias = constant_op.constant([4., 1.5, 2., 3., 5., 7.],
name="bias",
dtype=dtype)
added = nn.bias_add(conv, bias, name="bias_add")
relu = nn.relu(added, "relu")
identity = array_ops.identity(relu, "identity")
pool = nn_ops.max_pool(
identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
return array_ops.squeeze(pool, name="output_0")
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[100, 6, 6, 6]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"weights", "conv", "bias", "bias_add", "relu", "identity",
"max_pool"
]
}
class SimpleMultiEnginesTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
dtype = inp.dtype
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(12, 12, 6), dtype=dtype, name="c1")
p = math_ops.mul(conv, c1, name="mul")
c2 = constant_op.constant(
np.random.randn(12, 12, 6), dtype=dtype, name="c2")
q = math_ops.div(conv, c2, name="div")
edge = self.trt_incompatible_op(q, name="incompatible")
edge = math_ops.div(edge, edge, name="div1")
r = math_ops.add(edge, edge, name="add")
p = math_ops.sub(p, edge, name="sub")
q = math_ops.mul(q, edge, name="mul1")
s = math_ops.add(p, q, name="add1")
s = math_ops.sub(s, r, name="sub1")
return array_ops.squeeze(s, name="output_0")
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[100, 12, 12, 6]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"add", "add1", "c1", "div1", "mul", "mul1", "sub", "sub1"
],
"TRTEngineOp_1": ["c2", "conv", "div", "weights"]
}
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
return super(
SimpleMultiEnginesTest, self
).GetConversionParams(run_params)._replace(
# Disable layout optimizer, since it'll add Transpose(Const, Const) to
# the graph and breaks the conversion check.
rewriter_config_template=trt_test.OptimizerDisabledRewriterConfig())
class SimpleMultiEnginesTest2(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing two segment."""
n = inp
for i in range(2):
c = constant_op.constant(1.0, name="c%d" % i)
n = math_ops.add(n, c, name="add%d" % i)
n = math_ops.mul(n, n, name="mul%d" % i)
edge = self.trt_incompatible_op(n, name="incompatible")
with ops.control_dependencies([edge]):
c = constant_op.constant(1.0, name="c2")
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul2")
c = constant_op.constant(1.0, name="c3")
n = math_ops.add(n, c, name="add3")
n = math_ops.mul(n, n, name="mul3")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["c0", "c1", "add0", "add1", "mul0", "mul1"],
"TRTEngineOp_1": ["c2", "c3", "add2", "add3", "mul2", "mul3"]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
# Disable the test in fp16 mode since multiple matmul and add ops together
# can cause overflow.
return ((run_params.precision_mode != "FP16") and
not (trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.use_calibration))
class ConstInputTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
n = inp
c = constant_op.constant(1.0, name="c")
# Adds control dependency from the constant op to a trt incompatible op,
# and adds control dependency from the trt incompatible op to all other
# ops, to make sure the constant op cannot be contracted with any trt
# segment that depends on it.
with ops.control_dependencies([c]):
d = self.trt_incompatible_op(n, name="incompatible")
with ops.control_dependencies([d]):
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
n = self.trt_incompatible_op(n, name="incompatible1")
with ops.control_dependencies([d]):
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul1")
n = math_ops.add(n, n, name="add3")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["add", "add1", "mul"],
"TRTEngineOp_1": ["add2", "add3", "mul1"]
}
class ConstDataInputSingleEngineTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing single segment."""
n = inp
c = constant_op.constant(1.0, name="c")
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {"TRTEngineOp_0": ["c", "add", "add1", "mul"]}
class ConstDataInputMultipleEnginesTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
n = inp
c = constant_op.constant(1.0, name="c")
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
n = self.trt_incompatible_op(n, name="incompatible1")
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul1")
n = math_ops.add(n, n, name="add3")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["add2", "add3", "mul1"],
# Why segment ["add", "add1", "mul"] was assigned segment id 1
# instead of 0: the parent node of this segment is actually const
# node 'c', but it's removed later since it's const output of the
# segment which is not allowed.
"TRTEngineOp_1": ["add", "add1", "mul"]
}
class ControlDependencyTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
c1 = constant_op.constant(1.0, name="c1")
c2 = constant_op.constant(1.0, name="c2")
d1 = constant_op.constant(1.0, name="d1")
d2 = self.trt_incompatible_op(inp, name="d2")
with ops.control_dependencies([d1, d2]):
add = math_ops.add(inp, c1, name="add")
with ops.control_dependencies([d1, d2]):
mul = math_ops.mul(add, add, name="mul")
with ops.control_dependencies([d1, d2]):
add1 = math_ops.add(mul, mul, name="add1")
edge = self.trt_incompatible_op(add1, name="incompatible")
with ops.control_dependencies([d1, d2, add, mul]):
add2 = math_ops.add(edge, c2, name="add2")
with ops.control_dependencies([d1, d2, add1, mul]):
mul1 = math_ops.mul(add2, add2, name="mul1")
with ops.control_dependencies([d1, d2, add, add1]):
add3 = math_ops.add(mul1, mul1, name="add3")
return array_ops.squeeze(add3, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["c1", "add", "add1", "mul"],
"TRTEngineOp_1": ["c2", "add2", "add3", "mul1"]
}
if __name__ == "__main__":
test.main()
|
lmazuel/ansible
|
refs/heads/devel
|
lib/ansible/playbook/role/requirement.py
|
81
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import shutil
import subprocess
import tempfile
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.playbook.role.definition import RoleDefinition
__all__ = ['RoleRequirement']
VALID_SPEC_KEYS = [
'name',
'role',
'scm',
'src',
'version',
]
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class RoleRequirement(RoleDefinition):
"""
Helper class for Galaxy, which is used to parse both dependencies
specified in meta/main.yml and requirements.yml files.
"""
def __init__(self):
pass
@staticmethod
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
@staticmethod
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
display.deprecated("The comma separated role spec format, use the yaml/explicit format instead. Line that trigger this: %s" % role_spec,
version="2.7")
default_role_versions = dict(git='master', hg='tip')
role_spec = role_spec.strip()
role_version = ''
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = RoleRequirement.repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
@staticmethod
def role_yaml_parse(role):
if isinstance(role, string_types):
name = None
scm = None
src = None
version = None
if ',' in role:
if role.count(',') == 1:
(src, version) = role.strip().split(',', 1)
elif role.count(',') == 2:
(src, version, name) = role.strip().split(',', 2)
else:
raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
else:
src = role
if name is None:
name = RoleRequirement.repo_url_to_role_name(src)
if '+' in src:
(scm, src) = src.split('+', 1)
return dict(name=name, src=src, scm=scm, version=version)
if 'role' in role:
name = role['role']
if ',' in name:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role = RoleRequirement.role_spec_parse(role['role'])
else:
del role['role']
role['name'] = name
else:
role = role.copy()
if 'src'in role:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
for key in list(role.keys()):
if key not in VALID_SPEC_KEYS:
role.pop(key)
return role
@staticmethod
def scm_archive_role(src, scm='git', name=None, version='HEAD'):
if scm not in ['hg', 'git']:
raise AnsibleError("- scm %s is not currently supported" % scm)
tempdir = tempfile.mkdtemp()
clone_cmd = [scm, 'clone', src, name]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
except:
raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(clone_cmd), tempdir, rc))
if scm == 'git' and version:
checkout_cmd = [scm, 'checkout', version]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(checkout_cmd, cwd=os.path.join(tempdir, name), stdout=devnull, stderr=devnull)
except (IOError, OSError):
raise AnsibleError("error executing: %s" % " ".join(checkout_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(checkout_cmd), tempdir, rc))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
if scm == 'hg':
archive_cmd = ['hg', 'archive', '--prefix', "%s/" % name]
if version:
archive_cmd.extend(['-r', version])
archive_cmd.append(temp_file.name)
if scm == 'git':
archive_cmd = ['git', 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
if version:
archive_cmd.append(version)
else:
archive_cmd.append('HEAD')
with open('/dev/null', 'w') as devnull:
popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, name),
stderr=devnull, stdout=devnull)
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(archive_cmd), tempdir, rc))
shutil.rmtree(tempdir, ignore_errors=True)
return temp_file.name
|
fentas/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/executive.py
|
117
|
# Copyright (c) 2009, Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import logging
import multiprocessing
import os
import StringIO
import signal
import subprocess
import sys
import time
from webkitpy.common.system.outputtee import Tee
from webkitpy.common.system.filesystem import FileSystem
_log = logging.getLogger(__name__)
class ScriptError(Exception):
def __init__(self,
message=None,
script_args=None,
exit_code=None,
output=None,
cwd=None):
if not message:
message = 'Failed to run "%s"' % repr(script_args)
if exit_code:
message += " exit_code: %d" % exit_code
if cwd:
message += " cwd: %s" % cwd
Exception.__init__(self, message)
self.script_args = script_args # 'args' is already used by Exception
self.exit_code = exit_code
self.output = output
self.cwd = cwd
def message_with_output(self, output_limit=500):
if self.output:
if output_limit and len(self.output) > output_limit:
return u"%s\n\nLast %s characters of output:\n%s" % \
(self, output_limit, self.output[-output_limit:])
return u"%s\n\n%s" % (self, self.output)
return unicode(self)
def command_name(self):
command_path = self.script_args
if type(command_path) is list:
command_path = command_path[0]
return os.path.basename(command_path)
class Executive(object):
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
def __init__(self):
self.pid_to_system_pid = {}
def _should_close_fds(self):
# We need to pass close_fds=True to work around Python bug #2320
# (otherwise we can hang when we kill DumpRenderTree when we are running
# multiple threads). See http://bugs.python.org/issue2320 .
# Note that close_fds isn't supported on Windows, but this bug only
# shows up on Mac and Linux.
return sys.platform not in ('win32', 'cygwin')
def _run_command_with_teed_output(self, args, teed_output, **kwargs):
child_process = self.popen(args,
stdout=self.PIPE,
stderr=self.STDOUT,
close_fds=self._should_close_fds(),
**kwargs)
# Use our own custom wait loop because Popen ignores a tee'd
# stderr/stdout.
# FIXME: This could be improved not to flatten output to stdout.
while True:
output_line = child_process.stdout.readline()
if output_line == "" and child_process.poll() != None:
# poll() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
return child_process.poll()
# We assume that the child process wrote to us in utf-8,
# so no re-encoding is necessary before writing here.
teed_output.write(output_line)
# FIXME: Remove this deprecated method and move callers to run_command.
# FIXME: This method is a hack to allow running command which both
# capture their output and print out to stdin. Useful for things
# like "build-webkit" where we want to display to the user that we're building
# but still have the output to stuff into a log file.
def run_and_throw_if_fail(self, args, quiet=False, decode_output=True, **kwargs):
# Cache the child's output locally so it can be used for error reports.
child_out_file = StringIO.StringIO()
tee_stdout = sys.stdout
if quiet:
dev_null = open(os.devnull, "w") # FIXME: Does this need an encoding?
tee_stdout = dev_null
child_stdout = Tee(child_out_file, tee_stdout)
exit_code = self._run_command_with_teed_output(args, child_stdout, **kwargs)
if quiet:
dev_null.close()
child_output = child_out_file.getvalue()
child_out_file.close()
if decode_output:
child_output = child_output.decode(self._child_process_encoding())
if exit_code:
raise ScriptError(script_args=args,
exit_code=exit_code,
output=child_output)
return child_output
def cpu_count(self):
try:
cpus = int(os.environ.get('NUMBER_OF_PROCESSORS'))
if cpus > 0:
return cpus
except (ValueError, TypeError):
pass
return multiprocessing.cpu_count()
@staticmethod
def interpreter_for_script(script_path, fs=None):
fs = fs or FileSystem()
lines = fs.read_text_file(script_path).splitlines()
if not len(lines):
return None
first_line = lines[0]
if not first_line.startswith('#!'):
return None
if first_line.find('python') > -1:
return sys.executable
if first_line.find('perl') > -1:
return 'perl'
if first_line.find('ruby') > -1:
return 'ruby'
return None
@staticmethod
def shell_command_for_script(script_path, fs=None):
fs = fs or FileSystem()
# Win32 does not support shebang. We need to detect the interpreter ourself.
if sys.platform == 'win32':
interpreter = Executive.interpreter_for_script(script_path, fs)
if interpreter:
return [interpreter, script_path]
return [script_path]
def kill_process(self, pid):
"""Attempts to kill the given pid.
Will fail silently if pid does not exist or insufficient permisssions."""
if sys.platform == "win32":
# We only use taskkill.exe on windows (not cygwin) because subprocess.pid
# is a CYGWIN pid and taskkill.exe expects a windows pid.
# Thankfully os.kill on CYGWIN handles either pid type.
command = ["taskkill.exe", "/f", "/pid", pid]
# taskkill will exit 128 if the process is not found. We should log.
self.run_command(command, error_handler=self.ignore_error)
return
# According to http://docs.python.org/library/os.html
# os.kill isn't available on Windows. python 2.5.5 os.kill appears
# to work in cygwin, however it occasionally raises EAGAIN.
retries_left = 10 if sys.platform == "cygwin" else 1
while retries_left > 0:
try:
retries_left -= 1
os.kill(pid, signal.SIGKILL)
_ = os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.errno == errno.EAGAIN:
if retries_left <= 0:
_log.warn("Failed to kill pid %s. Too many EAGAIN errors." % pid)
continue
if e.errno == errno.ESRCH: # The process does not exist.
return
if e.errno == errno.EPIPE: # The process has exited already on cygwin
return
if e.errno == errno.ECHILD:
# Can't wait on a non-child process, but the kill worked.
return
if e.errno == errno.EACCES and sys.platform == 'cygwin':
# Cygwin python sometimes can't kill native processes.
return
raise
def _win32_check_running_pid(self, pid):
# importing ctypes at the top-level seems to cause weird crashes at
# exit under cygwin on apple's win port. Only win32 needs cygwin, so
# we import it here instead. See https://bugs.webkit.org/show_bug.cgi?id=91682
import ctypes
class PROCESSENTRY32(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_ulong),
("cntUsage", ctypes.c_ulong),
("th32ProcessID", ctypes.c_ulong),
("th32DefaultHeapID", ctypes.POINTER(ctypes.c_ulong)),
("th32ModuleID", ctypes.c_ulong),
("cntThreads", ctypes.c_ulong),
("th32ParentProcessID", ctypes.c_ulong),
("pcPriClassBase", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("szExeFile", ctypes.c_char * 260)]
CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot
Process32First = ctypes.windll.kernel32.Process32First
Process32Next = ctypes.windll.kernel32.Process32Next
CloseHandle = ctypes.windll.kernel32.CloseHandle
TH32CS_SNAPPROCESS = 0x00000002 # win32 magic number
hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
pe32 = PROCESSENTRY32()
pe32.dwSize = ctypes.sizeof(PROCESSENTRY32)
result = False
if not Process32First(hProcessSnap, ctypes.byref(pe32)):
_log.debug("Failed getting first process.")
CloseHandle(hProcessSnap)
return result
while True:
if pe32.th32ProcessID == pid:
result = True
break
if not Process32Next(hProcessSnap, ctypes.byref(pe32)):
break
CloseHandle(hProcessSnap)
return result
def check_running_pid(self, pid):
"""Return True if pid is alive, otherwise return False."""
if sys.platform == 'win32':
return self._win32_check_running_pid(pid)
try:
os.kill(pid, 0)
return True
except OSError:
return False
def running_pids(self, process_name_filter=None):
if sys.platform == "win32":
# FIXME: running_pids isn't implemented on native Windows yet...
return []
if not process_name_filter:
process_name_filter = lambda process_name: True
running_pids = []
if sys.platform in ("cygwin"):
ps_process = self.run_command(['ps', '-e'], error_handler=Executive.ignore_error)
for line in ps_process.splitlines():
tokens = line.strip().split()
try:
pid, ppid, pgid, winpid, tty, uid, stime, process_name = tokens
if process_name_filter(process_name):
running_pids.append(int(pid))
self.pid_to_system_pid[int(pid)] = int(winpid)
except ValueError, e:
pass
else:
ps_process = self.popen(['ps', '-eo', 'pid,comm'], stdout=self.PIPE, stderr=self.PIPE)
stdout, _ = ps_process.communicate()
for line in stdout.splitlines():
try:
# In some cases the line can contain one or more
# leading white-spaces, so strip it before split.
pid, process_name = line.strip().split(' ', 1)
if process_name_filter(process_name):
running_pids.append(int(pid))
except ValueError, e:
pass
return sorted(running_pids)
def wait_newest(self, process_name_filter=None):
if not process_name_filter:
process_name_filter = lambda process_name: True
running_pids = self.running_pids(process_name_filter)
if not running_pids:
return
pid = running_pids[-1]
while self.check_running_pid(pid):
time.sleep(0.25)
def wait_limited(self, pid, limit_in_seconds=None, check_frequency_in_seconds=None):
seconds_left = limit_in_seconds or 10
sleep_length = check_frequency_in_seconds or 1
while seconds_left > 0 and self.check_running_pid(pid):
seconds_left -= sleep_length
time.sleep(sleep_length)
def _windows_image_name(self, process_name):
name, extension = os.path.splitext(process_name)
if not extension:
# taskkill expects processes to end in .exe
# If necessary we could add a flag to disable appending .exe.
process_name = "%s.exe" % name
return process_name
def interrupt(self, pid):
interrupt_signal = signal.SIGINT
# FIXME: The python docs seem to imply that platform == 'win32' may need to use signal.CTRL_C_EVENT
# http://docs.python.org/2/library/signal.html
try:
os.kill(pid, interrupt_signal)
except OSError:
# Silently ignore when the pid doesn't exist.
# It's impossible for callers to avoid race conditions with process shutdown.
pass
def kill_all(self, process_name):
"""Attempts to kill processes matching process_name.
Will fail silently if no process are found."""
if sys.platform in ("win32", "cygwin"):
image_name = self._windows_image_name(process_name)
command = ["taskkill.exe", "/f", "/im", image_name]
# taskkill will exit 128 if the process is not found. We should log.
self.run_command(command, error_handler=self.ignore_error)
return
# FIXME: This is inconsistent that kill_all uses TERM and kill_process
# uses KILL. Windows is always using /f (which seems like -KILL).
# We should pick one mode, or add support for switching between them.
# Note: Mac OS X 10.6 requires -SIGNALNAME before -u USER
command = ["killall", "-TERM", "-u", os.getenv("USER"), process_name]
# killall returns 1 if no process can be found and 2 on command error.
# FIXME: We should pass a custom error_handler to allow only exit_code 1.
# We should log in exit_code == 1
self.run_command(command, error_handler=self.ignore_error)
# Error handlers do not need to be static methods once all callers are
# updated to use an Executive object.
@staticmethod
def default_error_handler(error):
raise error
@staticmethod
def ignore_error(error):
pass
def _compute_stdin(self, input):
"""Returns (stdin, string_to_communicate)"""
# FIXME: We should be returning /dev/null for stdin
# or closing stdin after process creation to prevent
# child processes from getting input from the user.
if not input:
return (None, None)
if hasattr(input, "read"): # Check if the input is a file.
return (input, None) # Assume the file is in the right encoding.
# Popen in Python 2.5 and before does not automatically encode unicode objects.
# http://bugs.python.org/issue5290
# See https://bugs.webkit.org/show_bug.cgi?id=37528
# for an example of a regresion caused by passing a unicode string directly.
# FIXME: We may need to encode differently on different platforms.
if isinstance(input, unicode):
input = input.encode(self._child_process_encoding())
return (self.PIPE, input)
def command_for_printing(self, args):
"""Returns a print-ready string representing command args.
The string should be copy/paste ready for execution in a shell."""
args = self._stringify_args(args)
escaped_args = []
for arg in args:
if isinstance(arg, unicode):
# Escape any non-ascii characters for easy copy/paste
arg = arg.encode("unicode_escape")
# FIXME: Do we need to fix quotes here?
escaped_args.append(arg)
return " ".join(escaped_args)
# FIXME: run_and_throw_if_fail should be merged into this method.
def run_command(self,
args,
cwd=None,
env=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=True):
"""Popen wrapper for convenience and to work around python bugs."""
assert(isinstance(args, list) or isinstance(args, tuple))
start_time = time.time()
stdin, string_to_communicate = self._compute_stdin(input)
stderr = self.STDOUT if return_stderr else None
process = self.popen(args,
stdin=stdin,
stdout=self.PIPE,
stderr=stderr,
cwd=cwd,
env=env,
close_fds=self._should_close_fds())
output = process.communicate(string_to_communicate)[0]
# run_command automatically decodes to unicode() unless explicitly told not to.
if decode_output:
output = output.decode(self._child_process_encoding())
# wait() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
exit_code = process.wait()
_log.debug('"%s" took %.2fs' % (self.command_for_printing(args), time.time() - start_time))
if return_exit_code:
return exit_code
if exit_code:
script_error = ScriptError(script_args=args,
exit_code=exit_code,
output=output,
cwd=cwd)
(error_handler or self.default_error_handler)(script_error)
return output
def _child_process_encoding(self):
# Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
# to launch subprocesses, so we have to encode arguments using the
# current code page.
if sys.platform == 'win32' and sys.version < '3':
return 'mbcs'
# All other platforms use UTF-8.
# FIXME: Using UTF-8 on Cygwin will confuse Windows-native commands
# which will expect arguments to be encoded using the current code
# page.
return 'utf-8'
def _should_encode_child_process_arguments(self):
# Cygwin's Python's os.execv doesn't support unicode command
# arguments, and neither does Cygwin's execv itself.
if sys.platform == 'cygwin':
return True
# Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
# to launch subprocesses, so we have to encode arguments using the
# current code page.
if sys.platform == 'win32' and sys.version < '3':
return True
return False
def _encode_argument_if_needed(self, argument):
if not self._should_encode_child_process_arguments():
return argument
return argument.encode(self._child_process_encoding())
def _stringify_args(self, args):
# Popen will throw an exception if args are non-strings (like int())
string_args = map(unicode, args)
# The Windows implementation of Popen cannot handle unicode strings. :(
return map(self._encode_argument_if_needed, string_args)
# The only required arugment to popen is named "args", the rest are optional keyword arguments.
def popen(self, args, **kwargs):
# FIXME: We should always be stringifying the args, but callers who pass shell=True
# expect that the exact bytes passed will get passed to the shell (even if they're wrongly encoded).
# shell=True is wrong for many other reasons, and we should remove this
# hack as soon as we can fix all callers to not use shell=True.
if kwargs.get('shell') == True:
string_args = args
else:
string_args = self._stringify_args(args)
return subprocess.Popen(string_args, **kwargs)
def run_in_parallel(self, command_lines_and_cwds, processes=None):
"""Runs a list of (cmd_line list, cwd string) tuples in parallel and returns a list of (retcode, stdout, stderr) tuples."""
assert len(command_lines_and_cwds)
if sys.platform in ('cygwin', 'win32'):
return map(_run_command_thunk, command_lines_and_cwds)
pool = multiprocessing.Pool(processes=processes)
results = pool.map(_run_command_thunk, command_lines_and_cwds)
pool.close()
pool.join()
return results
def _run_command_thunk(cmd_line_and_cwd):
# Note that this needs to be a bare module (and hence Picklable) method to work with multiprocessing.Pool.
(cmd_line, cwd) = cmd_line_and_cwd
proc = subprocess.Popen(cmd_line, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return (proc.returncode, stdout, stderr)
|
miurahr/pinax-blog
|
refs/heads/master
|
pinax/blog/utils.py
|
5
|
from django.core.exceptions import ImproperlyConfigured
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
try:
import twitter
except ImportError:
twitter = None
from .conf import settings
def can_tweet():
creds_available = (hasattr(settings, "TWITTER_USERNAME") and
hasattr(settings, "TWITTER_PASSWORD"))
return twitter and creds_available
def load_path_attr(path):
i = path.rfind(".")
module, attr = path[:i], path[i + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured("Error importing %s: '%s'" % (module, e))
try:
attr = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured("Module '%s' does not define a '%s'" % (module, attr))
return attr
|
quantumlib/OpenFermion-Cirq
|
refs/heads/master
|
openfermioncirq/variational/objective.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import abc
import numpy
import cirq
class VariationalObjective(metaclass=abc.ABCMeta):
"""An objective function for a variational algorithm.
A variational objective is a way of assigning a numerical value, or score,
to the output from executing a circuit. The goal of a variational
algorithm is to find a setting of parameters that minimizes the value
of the resulting circuit output.
The VariationalObjective class supports the option to provide a noise
and cost model for the value. This is useful for modeling situations
in which the value can be determined only approximately and there is a
tradeoff between the accuracy of the evaluation and the cost of the
evaluation.
"""
@abc.abstractmethod
def value(self,
circuit_output: Union[cirq.TrialResult,
cirq.SimulationTrialResult,
numpy.ndarray]
) -> float:
"""The evaluation function for a circuit output.
A variational quantum algorithm will attempt to minimize this value over
possible settings of the parameters.
"""
def noise(self, cost: Optional[float]=None) -> float:
"""Artificial noise that may be added to the true objective value.
The `cost` argument is used to model situations in which it is possible
to reduce the magnitude of the noise at some cost.
"""
# Default: no noise
return 0.0
def noise_bounds(self,
cost: float,
confidence: Optional[float]=None
) -> Tuple[float, float]:
"""Exact or approximate bounds on noise.
Returns a tuple (a, b) such that when `noise` is called with the given
cost, the returned value lies between a and b. It should be the case
that a <= 0 <= b.
This function takes an optional `confidence` parameter which is a real
number strictly between 0 and 1 that gives the probability of the bounds
being correct. This is used for situations in which exact bounds on the
noise cannot be guaranteed.
"""
return -numpy.inf, numpy.inf
|
infus0815/xbmc
|
refs/heads/master
|
tools/Fake Episode Maker/openAnything.py
|
169
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
# http://xbmc.org
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
import urllib2, urlparse, gzip
from StringIO import StringIO
USER_AGENT = 'OpenAnything/1.0 +http://diveintopython.org/http_web_services/'
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(
self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
result.status = code
return result
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
def openAnything(source, etag=None, lastmodified=None, agent=USER_AGENT):
'''URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the lastmodified argument is supplied, it must be a formatted
date/time string in GMT (as returned in the Last-Modified header of
a previous request). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
'''
if hasattr(source, 'read'):
return source
if source == '-':
return sys.stdin
if urlparse.urlparse(source)[0] == 'http':
# open URL with urllib2
request = urllib2.Request(source)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if lastmodified:
request.add_header('If-Modified-Since', lastmodified)
request.add_header('Accept-encoding', 'gzip')
opener = urllib2.build_opener(SmartRedirectHandler(), DefaultErrorHandler())
return opener.open(request)
# try to open with native open function (if source is a filename)
try:
return open(source)
except (IOError, OSError):
pass
# treat source as string
return StringIO(str(source))
def fetch(source, etag=None, last_modified=None, agent=USER_AGENT):
'''Fetch data and metadata from a URL, file, stream, or string'''
result = {}
f = openAnything(source, etag, last_modified, agent)
result['data'] = f.read()
if hasattr(f, 'headers'):
# save ETag, if the server sent one
result['etag'] = f.headers.get('ETag')
# save Last-Modified header, if the server sent one
result['lastmodified'] = f.headers.get('Last-Modified')
if f.headers.get('content-encoding', '') == 'gzip':
# data came back gzip-compressed, decompress it
result['data'] = gzip.GzipFile(fileobj=StringIO(result['data'])).read()
if hasattr(f, 'url'):
result['url'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
f.close()
return result
|
Lujeni/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_monitor_tcp.py
|
22
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_monitor_tcp import Parameters
from library.modules.bigip_monitor_tcp import ModuleManager
from library.modules.bigip_monitor_tcp import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_monitor_tcp import Parameters
from ansible.modules.network.f5.bigip_monitor_tcp import ModuleManager
from ansible.modules.network.f5.bigip_monitor_tcp import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='parent',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
type='TTYPE_TCP',
port=80,
interval=20,
timeout=30,
time_until_up=60,
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.send == 'this is a send string'
assert p.receive == 'this is a receive string'
assert p.ip == '10.10.10.10'
assert p.type == 'tcp'
assert p.port == 80
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_module_parameters_ints_as_strings(self):
args = dict(
name='foo',
parent='parent',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
type='TTYPE_TCP',
port='80',
interval='20',
timeout='30',
time_until_up='60',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.send == 'this is a send string'
assert p.receive == 'this is a receive string'
assert p.ip == '10.10.10.10'
assert p.type == 'tcp'
assert p.port == 80
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_api_parameters(self):
args = dict(
name='foo',
defaultsFrom='/Common/parent',
send='this is a send string',
recv='this is a receive string',
destination='10.10.10.10:80',
interval=20,
timeout=30,
timeUntilUp=60
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.send == 'this is a send string'
assert p.receive == 'this is a receive string'
assert p.ip == '10.10.10.10'
assert p.type == 'tcp'
assert p.port == 80
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
parent='parent',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['parent'] == '/Common/parent'
def test_create_monitor_idempotent(self, *args):
set_module_args(dict(
name='foo',
parent='tcp',
send='this is a send string',
receive='this is a receive string',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is False
def test_update_port(self, *args):
set_module_args(dict(
name='foo',
port=800,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['port'] == 800
def test_update_interval(self, *args):
set_module_args(dict(
name='foo',
interval=10,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['interval'] == 10
def test_update_interval_larger_than_existing_timeout(self, *args):
set_module_args(dict(
name='foo',
interval=30,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex.value)
def test_update_interval_larger_than_new_timeout(self, *args):
set_module_args(dict(
name='foo',
interval=10,
timeout=5,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex.value)
def test_update_send(self, *args):
set_module_args(dict(
name='foo',
send='this is another send string',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['send'] == 'this is another send string'
def test_update_receive(self, *args):
set_module_args(dict(
name='foo',
receive='this is another receive string',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['receive'] == 'this is another receive string'
def test_update_timeout(self, *args):
set_module_args(dict(
name='foo',
timeout=300,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['timeout'] == 300
def test_update_time_until_up(self, *args):
set_module_args(dict(
name='foo',
time_until_up=300,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['time_until_up'] == 300
|
SUSE/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-keyvault/azure/mgmt/keyvault/key_vault_management_client.py
|
4
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.vaults_operations import VaultsOperations
from . import models
class KeyVaultManagementClientConfiguration(AzureConfiguration):
"""Configuration for KeyVaultManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify Microsoft Azure subscription. The subscription ID forms part of
the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'https://management.azure.com'
super(KeyVaultManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('keyvaultmanagementclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class KeyVaultManagementClient(object):
"""The Azure management API provides a RESTful set of web services that interact with Azure Key Vault.
:ivar config: Configuration for client.
:vartype config: KeyVaultManagementClientConfiguration
:ivar vaults: Vaults operations
:vartype vaults: azure.mgmt.keyvault.operations.VaultsOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify Microsoft Azure subscription. The subscription ID forms part of
the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = KeyVaultManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2016-10-01'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.vaults = VaultsOperations(
self._client, self.config, self._serialize, self._deserialize)
|
infoxchange/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/regressiontests/utils/checksums.py
|
246
|
import unittest
from django.utils import checksums
class TestUtilsChecksums(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_luhn(self):
f = checksums.luhn
items = (
(4111111111111111, True), ('4111111111111111', True),
(4222222222222, True), (378734493671000, True),
(5424000000000015, True), (5555555555554444, True),
(1008, True), ('0000001008', True), ('000000001008', True),
(4012888888881881, True), (1234567890123456789012345678909, True),
(4111111111211111, False), (42222222222224, False),
(100, False), ('100', False), ('0000100', False),
('abc', False), (None, False), (object(), False),
)
for value, output in items:
self.check_output(f, value, output)
|
lowitty/server
|
refs/heads/master
|
libsDarwin/twisted/names/hosts.py
|
42
|
# -*- test-case-name: twisted.names.test.test_hosts -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
hosts(5) support.
"""
from __future__ import division, absolute_import
from twisted.python.compat import nativeString
from twisted.names import dns
from twisted.python import failure
from twisted.python.filepath import FilePath
from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.names import common
def searchFileForAll(hostsFile, name):
"""
Search the given file, which is in hosts(5) standard format, for an address
entry with a given name.
@param hostsFile: The name of the hosts(5)-format file to search.
@type hostsFile: L{FilePath}
@param name: The name to search for.
@type name: C{str}
@return: C{None} if the name is not found in the file, otherwise a
C{str} giving the address in the file associated with the name.
"""
results = []
try:
lines = hostsFile.getContent().splitlines()
except:
return results
name = name.lower()
for line in lines:
idx = line.find(b'#')
if idx != -1:
line = line[:idx]
if not line:
continue
parts = line.split()
if name.lower() in [s.lower() for s in parts[1:]]:
results.append(nativeString(parts[0]))
return results
def searchFileFor(file, name):
"""
Grep given file, which is in hosts(5) standard format, for an address
entry with a given name.
@param file: The name of the hosts(5)-format file to search.
@param name: The name to search for.
@type name: C{str}
@return: C{None} if the name is not found in the file, otherwise a
C{str} giving the address in the file associated with the name.
"""
addresses = searchFileForAll(FilePath(file), name)
if addresses:
return addresses[0]
return None
class Resolver(common.ResolverBase):
"""
A resolver that services hosts(5) format files.
"""
def __init__(self, file=b'/etc/hosts', ttl = 60 * 60):
common.ResolverBase.__init__(self)
self.file = file
self.ttl = ttl
def _aRecords(self, name):
"""
Return a tuple of L{dns.RRHeader} instances for all of the IPv4
addresses in the hosts file.
"""
return tuple([
dns.RRHeader(name, dns.A, dns.IN, self.ttl,
dns.Record_A(addr, self.ttl))
for addr
in searchFileForAll(FilePath(self.file), name)
if isIPAddress(addr)])
def _aaaaRecords(self, name):
"""
Return a tuple of L{dns.RRHeader} instances for all of the IPv6
addresses in the hosts file.
"""
return tuple([
dns.RRHeader(name, dns.AAAA, dns.IN, self.ttl,
dns.Record_AAAA(addr, self.ttl))
for addr
in searchFileForAll(FilePath(self.file), name)
if not isIPAddress(addr)])
def _respond(self, name, records):
"""
Generate a response for the given name containing the given result
records, or a failure if there are no result records.
@param name: The DNS name the response is for.
@type name: C{str}
@param records: A tuple of L{dns.RRHeader} instances giving the results
that will go into the response.
@return: A L{Deferred} which will fire with a three-tuple of result
records, authority records, and additional records, or which will
fail with L{dns.DomainError} if there are no result records.
"""
if records:
return defer.succeed((records, (), ()))
return defer.fail(failure.Failure(dns.DomainError(name)))
def lookupAddress(self, name, timeout=None):
"""
Read any IPv4 addresses from C{self.file} and return them as L{Record_A}
instances.
"""
return self._respond(name, self._aRecords(name))
def lookupIPV6Address(self, name, timeout=None):
"""
Read any IPv6 addresses from C{self.file} and return them as
L{Record_AAAA} instances.
"""
return self._respond(name, self._aaaaRecords(name))
# Someday this should include IPv6 addresses too, but that will cause
# problems if users of the API (mainly via getHostByName) aren't updated to
# know about IPv6 first.
lookupAllRecords = lookupAddress
|
adamgreenhall/scikit-learn
|
refs/heads/master
|
sklearn/tests/test_lda.py
|
77
|
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = lda._cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = lda._cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
|
zhangyage/Python-oldboy
|
refs/heads/master
|
day04/exceptions/guess.py
|
1
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import random
num = random.randint(0,100)
while True:
try:
guess = int(raw_input("Enter 1~100:"))
except ValueError,e: #当guess输入时判断输入的是否是数字
print "Enter 1~100"
continue
if guess > num:
print "guess Bigger:",guess
elif guess < num:
print "guess Smaller:",guess
else:
print "Guess Ok,Game Over!"
break
print "\n"
|
kalahbrown/HueBigSQL
|
refs/heads/master
|
desktop/core/ext-py/python-openid-2.2.5/openid/consumer/html_parse.py
|
167
|
"""
This module implements a VERY limited parser that finds <link> tags in
the head of HTML or XHTML documents and parses out their attributes
according to the OpenID spec. It is a liberal parser, but it requires
these things from the data in order to work:
- There must be an open <html> tag
- There must be an open <head> tag inside of the <html> tag
- Only <link>s that are found inside of the <head> tag are parsed
(this is by design)
- The parser follows the OpenID specification in resolving the
attributes of the link tags. This means that the attributes DO NOT
get resolved as they would by an XML or HTML parser. In particular,
only certain entities get replaced, and href attributes do not get
resolved relative to a base URL.
From http://openid.net/specs.bml#linkrel:
- The openid.server URL MUST be an absolute URL. OpenID consumers
MUST NOT attempt to resolve relative URLs.
- The openid.server URL MUST NOT include entities other than &,
<, >, and ".
The parser ignores SGML comments and <![CDATA[blocks]]>. Both kinds of
quoting are allowed for attributes.
The parser deals with invalid markup in these ways:
- Tag names are not case-sensitive
- The <html> tag is accepted even when it is not at the top level
- The <head> tag is accepted even when it is not a direct child of
the <html> tag, but a <html> tag must be an ancestor of the <head>
tag
- <link> tags are accepted even when they are not direct children of
the <head> tag, but a <head> tag must be an ancestor of the <link>
tag
- If there is no closing tag for an open <html> or <head> tag, the
remainder of the document is viewed as being inside of the tag. If
there is no closing tag for a <link> tag, the link tag is treated
as a short tag. Exceptions to this rule are that <html> closes
<html> and <body> or <head> closes <head>
- Attributes of the <link> tag are not required to be quoted.
- In the case of duplicated attribute names, the attribute coming
last in the tag will be the value returned.
- Any text that does not parse as an attribute within a link tag will
be ignored. (e.g. <link pumpkin rel='openid.server' /> will ignore
pumpkin)
- If there are more than one <html> or <head> tag, the parser only
looks inside of the first one.
- The contents of <script> tags are ignored entirely, except unclosed
<script> tags. Unclosed <script> tags are ignored.
- Any other invalid markup is ignored, including unclosed SGML
comments and unclosed <![CDATA[blocks.
"""
__all__ = ['parseLinkAttrs']
import re
flags = ( re.DOTALL # Match newlines with '.'
| re.IGNORECASE
| re.VERBOSE # Allow comments and whitespace in patterns
| re.UNICODE # Make \b respect Unicode word boundaries
)
# Stuff to remove before we start looking for tags
removed_re = re.compile(r'''
# Comments
<!--.*?-->
# CDATA blocks
| <!\[CDATA\[.*?\]\]>
# script blocks
| <script\b
# make sure script is not an XML namespace
(?!:)
[^>]*>.*?</script>
''', flags)
tag_expr = r'''
# Starts with the tag name at a word boundary, where the tag name is
# not a namespace
<%(tag_name)s\b(?!:)
# All of the stuff up to a ">", hopefully attributes.
(?P<attrs>[^>]*?)
(?: # Match a short tag
/>
| # Match a full tag
>
(?P<contents>.*?)
# Closed by
(?: # One of the specified close tags
</?%(closers)s\s*>
# End of the string
| \Z
)
)
'''
def tagMatcher(tag_name, *close_tags):
if close_tags:
options = '|'.join((tag_name,) + close_tags)
closers = '(?:%s)' % (options,)
else:
closers = tag_name
expr = tag_expr % locals()
return re.compile(expr, flags)
# Must contain at least an open html and an open head tag
html_find = tagMatcher('html')
head_find = tagMatcher('head', 'body')
link_find = re.compile(r'<link\b(?!:)', flags)
attr_find = re.compile(r'''
# Must start with a sequence of word-characters, followed by an equals sign
(?P<attr_name>\w+)=
# Then either a quoted or unquoted attribute
(?:
# Match everything that\'s between matching quote marks
(?P<qopen>["\'])(?P<q_val>.*?)(?P=qopen)
|
# If the value is not quoted, match up to whitespace
(?P<unq_val>(?:[^\s<>/]|/(?!>))+)
)
|
(?P<end_link>[<>])
''', flags)
# Entity replacement:
replacements = {
'amp':'&',
'lt':'<',
'gt':'>',
'quot':'"',
}
ent_replace = re.compile(r'&(%s);' % '|'.join(replacements.keys()))
def replaceEnt(mo):
"Replace the entities that are specified by OpenID"
return replacements.get(mo.group(1), mo.group())
def parseLinkAttrs(html):
"""Find all link tags in a string representing a HTML document and
return a list of their attributes.
@param html: the text to parse
@type html: str or unicode
@return: A list of dictionaries of attributes, one for each link tag
@rtype: [[(type(html), type(html))]]
"""
stripped = removed_re.sub('', html)
html_mo = html_find.search(stripped)
if html_mo is None or html_mo.start('contents') == -1:
return []
start, end = html_mo.span('contents')
head_mo = head_find.search(stripped, start, end)
if head_mo is None or head_mo.start('contents') == -1:
return []
start, end = head_mo.span('contents')
link_mos = link_find.finditer(stripped, head_mo.start(), head_mo.end())
matches = []
for link_mo in link_mos:
start = link_mo.start() + 5
link_attrs = {}
for attr_mo in attr_find.finditer(stripped, start):
if attr_mo.lastgroup == 'end_link':
break
# Either q_val or unq_val must be present, but not both
# unq_val is a True (non-empty) value if it is present
attr_name, q_val, unq_val = attr_mo.group(
'attr_name', 'q_val', 'unq_val')
attr_val = ent_replace.sub(replaceEnt, unq_val or q_val)
link_attrs[attr_name] = attr_val
matches.append(link_attrs)
return matches
def relMatches(rel_attr, target_rel):
"""Does this target_rel appear in the rel_str?"""
# XXX: TESTME
rels = rel_attr.strip().split()
for rel in rels:
rel = rel.lower()
if rel == target_rel:
return 1
return 0
def linkHasRel(link_attrs, target_rel):
"""Does this link have target_rel as a relationship?"""
# XXX: TESTME
rel_attr = link_attrs.get('rel')
return rel_attr and relMatches(rel_attr, target_rel)
def findLinksRel(link_attrs_list, target_rel):
"""Filter the list of link attributes on whether it has target_rel
as a relationship."""
# XXX: TESTME
matchesTarget = lambda attrs: linkHasRel(attrs, target_rel)
return filter(matchesTarget, link_attrs_list)
def findFirstHref(link_attrs_list, target_rel):
"""Return the value of the href attribute for the first link tag
in the list that has target_rel as a relationship."""
# XXX: TESTME
matches = findLinksRel(link_attrs_list, target_rel)
if not matches:
return None
first = matches[0]
return first.get('href')
|
jjmiranda/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/tests/test_access.py
|
8
|
# -*- coding: utf-8 -*-
"""
Test the access control framework
"""
import datetime
import ddt
import itertools
import pytz
from django.contrib.auth.models import User
from ccx_keys.locator import CCXLocator
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import Mock, patch
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from ccx.tests.factories import CcxFactory
import courseware.access as access
import courseware.access_response as access_response
from courseware.masquerade import CourseMasquerade
from courseware.tests.factories import (
BetaTesterFactory,
GlobalStaffFactory,
InstructorFactory,
StaffFactory,
UserFactory,
)
from courseware.tests.helpers import LoginEnrollmentTestCase
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from student.models import CourseEnrollment
from student.roles import CourseCcxCoachRole
from student.tests.factories import (
AdminFactory,
AnonymousUserFactory,
CourseEnrollmentAllowedFactory,
CourseEnrollmentFactory,
)
from xmodule.course_module import (
CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
CATALOG_VISIBILITY_ABOUT,
CATALOG_VISIBILITY_NONE,
)
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
SharedModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE
)
from xmodule.modulestore.xml import CourseLocationManager
from xmodule.tests import get_test_system
from util.milestones_helpers import (
set_prerequisite_courses,
fulfill_course_milestone,
)
from milestones.tests.utils import MilestonesTestCaseMixin
from lms.djangoapps.ccx.models import CustomCourseForEdX
# pylint: disable=protected-access
class CoachAccessTestCaseCCX(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test if user is coach on ccx.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
"""
Set up course for tests
"""
super(CoachAccessTestCaseCCX, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
"""
Set up tests
"""
super(CoachAccessTestCaseCCX, self).setUp()
# Create ccx coach account
self.coach = AdminFactory.create(password="test")
self.client.login(username=self.coach.username, password="test")
# assign role to coach
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
self.request_factory = RequestFactory()
def make_ccx(self):
"""
create ccx
"""
ccx = CustomCourseForEdX(
course_id=self.course.id,
coach=self.coach,
display_name="Test CCX"
)
ccx.save()
ccx_locator = CCXLocator.from_course_locator(self.course.id, unicode(ccx.id))
role = CourseCcxCoachRole(ccx_locator)
role.add_users(self.coach)
CourseEnrollment.enroll(self.coach, ccx_locator)
return ccx_locator
def test_has_ccx_coach_role(self):
"""
Assert that user has coach access on ccx.
"""
ccx_locator = self.make_ccx()
# user have access as coach on ccx
self.assertTrue(access.has_ccx_coach_role(self.coach, ccx_locator))
# user dont have access as coach on ccx
self.setup_user()
self.assertFalse(access.has_ccx_coach_role(self.user, ccx_locator))
def test_access_student_progress_ccx(self):
"""
Assert that only a coach can see progress of student.
"""
ccx_locator = self.make_ccx()
student = UserFactory()
# Enroll user
CourseEnrollment.enroll(student, ccx_locator)
# Test for access of a coach
resp = self.client.get(reverse('student_progress', args=[unicode(ccx_locator), student.id]))
self.assertEqual(resp.status_code, 200)
# Assert access of a student
self.client.login(username=student.username, password='test')
resp = self.client.get(reverse('student_progress', args=[unicode(ccx_locator), self.coach.id]))
self.assertEqual(resp.status_code, 404)
@attr(shard=1)
@ddt.ddt
class AccessTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Tests for the various access controls on the student dashboard
"""
TOMORROW = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=1)
YESTERDAY = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=1)
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
super(AccessTestCase, self).setUp()
self.course = CourseFactory.create(org='edX', course='toy', run='test_run')
self.anonymous_user = AnonymousUserFactory()
self.beta_user = BetaTesterFactory(course_key=self.course.id)
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course_key=self.course.id)
self.course_instructor = InstructorFactory(course_key=self.course.id)
self.staff = GlobalStaffFactory()
def verify_access(self, mock_unit, student_should_have_access, expected_error_type=None):
""" Verify the expected result from _has_access_descriptor """
response = access._has_access_descriptor(self.anonymous_user, 'load', mock_unit, course_key=self.course.id)
self.assertEqual(student_should_have_access, bool(response))
if expected_error_type is not None:
self.assertIsInstance(response, expected_error_type)
self.assertIsNotNone(response.to_json()['error_code'])
self.assertTrue(
access._has_access_descriptor(self.course_staff, 'load', mock_unit, course_key=self.course.id)
)
def test_has_staff_access_to_preview_mode(self):
"""
Tests users have right access to content in preview mode.
"""
course_key = self.course.id
usage_key = self.course.scope_ids.usage_id
chapter = ItemFactory.create(category="chapter", parent_location=self.course.location)
overview = CourseOverview.get_from_id(course_key)
test_system = get_test_system()
ccx = CcxFactory(course_id=course_key)
ccx_locator = CCXLocator.from_course_locator(course_key, ccx.id)
error_descriptor = ErrorDescriptor.from_xml(
u"<problem>ABC \N{SNOWMAN}</problem>",
test_system,
CourseLocationManager(course_key),
"error msg"
)
# Enroll student to the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
modules = [
self.course,
overview,
chapter,
ccx_locator,
error_descriptor,
course_key,
usage_key,
]
# Course key is not None
self.assertTrue(
bool(access.has_staff_access_to_preview_mode(self.global_staff, obj=self.course, course_key=course_key))
)
for user in [self.global_staff, self.course_staff, self.course_instructor]:
for obj in modules:
self.assertTrue(bool(access.has_staff_access_to_preview_mode(user, obj=obj)))
self.assertFalse(bool(access.has_staff_access_to_preview_mode(self.student, obj=obj)))
def test_student_has_access(self):
"""
Tests course student have right access to content w/o preview.
"""
course_key = self.course.id
chapter = ItemFactory.create(category="chapter", parent_location=self.course.location)
overview = CourseOverview.get_from_id(course_key)
# Enroll student to the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
modules = [
self.course,
overview,
chapter,
]
with patch('courseware.access.in_preview_mode') as mock_preview:
mock_preview.return_value = False
for obj in modules:
self.assertTrue(bool(access.has_access(self.student, 'load', obj, course_key=self.course.id)))
with patch('courseware.access.in_preview_mode') as mock_preview:
mock_preview.return_value = True
for obj in modules:
self.assertFalse(bool(access.has_access(self.student, 'load', obj, course_key=self.course.id)))
def test_string_has_staff_access_to_preview_mode(self):
"""
Tests different users has right access to string content in preview mode.
"""
self.assertTrue(bool(access.has_staff_access_to_preview_mode(self.global_staff, obj='global')))
self.assertFalse(bool(access.has_staff_access_to_preview_mode(self.course_staff, obj='global')))
self.assertFalse(bool(access.has_staff_access_to_preview_mode(self.course_instructor, obj='global')))
self.assertFalse(bool(access.has_staff_access_to_preview_mode(self.student, obj='global')))
@patch('courseware.access.in_preview_mode', Mock(return_value=True))
def test_has_access_with_preview_mode(self):
"""
Tests particular user's can access content via has_access in preview mode.
"""
self.assertTrue(bool(access.has_access(self.global_staff, 'staff', self.course, course_key=self.course.id)))
self.assertTrue(bool(access.has_access(self.course_staff, 'staff', self.course, course_key=self.course.id)))
self.assertTrue(bool(access.has_access(
self.course_instructor, 'staff', self.course, course_key=self.course.id
)))
self.assertFalse(bool(access.has_access(self.student, 'staff', self.course, course_key=self.course.id)))
self.assertFalse(bool(access.has_access(self.student, 'load', self.course, course_key=self.course.id)))
# User should be able to preview when masquerade.
with patch('courseware.access.is_masquerading_as_student') as mock_masquerade:
mock_masquerade.return_value = True
self.assertTrue(
bool(access.has_access(self.global_staff, 'staff', self.course, course_key=self.course.id))
)
self.assertFalse(
bool(access.has_access(self.student, 'staff', self.course, course_key=self.course.id))
)
def test_has_access_to_course(self):
self.assertFalse(access._has_access_to_course(
None, 'staff', self.course.id
))
self.assertFalse(access._has_access_to_course(
self.anonymous_user, 'staff', self.course.id
))
self.assertFalse(access._has_access_to_course(
self.anonymous_user, 'instructor', self.course.id
))
self.assertTrue(access._has_access_to_course(
self.global_staff, 'staff', self.course.id
))
self.assertTrue(access._has_access_to_course(
self.global_staff, 'instructor', self.course.id
))
# A user has staff access if they are in the staff group
self.assertTrue(access._has_access_to_course(
self.course_staff, 'staff', self.course.id
))
self.assertFalse(access._has_access_to_course(
self.course_staff, 'instructor', self.course.id
))
# A user has staff and instructor access if they are in the instructor group
self.assertTrue(access._has_access_to_course(
self.course_instructor, 'staff', self.course.id
))
self.assertTrue(access._has_access_to_course(
self.course_instructor, 'instructor', self.course.id
))
# A user does not have staff or instructor access if they are
# not in either the staff or the the instructor group
self.assertFalse(access._has_access_to_course(
self.student, 'staff', self.course.id
))
self.assertFalse(access._has_access_to_course(
self.student, 'instructor', self.course.id
))
self.assertFalse(access._has_access_to_course(
self.student, 'not_staff_or_instructor', self.course.id
))
def test__has_access_string(self):
user = Mock(is_staff=True)
self.assertFalse(access._has_access_string(user, 'staff', 'not_global'))
user._has_global_staff_access.return_value = True
self.assertTrue(access._has_access_string(user, 'staff', 'global'))
self.assertRaises(ValueError, access._has_access_string, user, 'not_staff', 'global')
@ddt.data(
('load', False, True, True),
('staff', False, True, True),
('instructor', False, False, True)
)
@ddt.unpack
def test__has_access_error_desc(self, action, expected_student, expected_staff, expected_instructor):
descriptor = Mock()
for (user, expected_response) in (
(self.student, expected_student),
(self.course_staff, expected_staff),
(self.course_instructor, expected_instructor)
):
self.assertEquals(
bool(access._has_access_error_desc(user, action, descriptor, self.course.id)),
expected_response
)
with self.assertRaises(ValueError):
access._has_access_error_desc(self.course_instructor, 'not_load_or_staff', descriptor, self.course.id)
def test__has_access_descriptor(self):
# TODO: override DISABLE_START_DATES and test the start date branch of the method
user = Mock()
descriptor = Mock(user_partitions=[])
descriptor._class_tags = {}
# Always returns true because DISABLE_START_DATES is set in test.py
self.assertTrue(access._has_access_descriptor(user, 'load', descriptor))
self.assertTrue(access._has_access_descriptor(user, 'instructor', descriptor))
with self.assertRaises(ValueError):
access._has_access_descriptor(user, 'not_load_or_staff', descriptor)
@ddt.data(
(True, None, access_response.VisibilityError),
(False, None),
(True, YESTERDAY, access_response.VisibilityError),
(False, YESTERDAY),
(True, TOMORROW, access_response.VisibilityError),
(False, TOMORROW, access_response.StartDateError)
)
@ddt.unpack
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test__has_access_descriptor_staff_lock(self, visible_to_staff_only, start, expected_error_type=None):
"""
Tests that "visible_to_staff_only" overrides start date.
"""
expected_access = expected_error_type is None
mock_unit = Mock(location=self.course.location, user_partitions=[])
mock_unit._class_tags = {} # Needed for detached check in _has_access_descriptor
mock_unit.visible_to_staff_only = visible_to_staff_only
mock_unit.start = start
self.verify_access(mock_unit, expected_access, expected_error_type)
def test__has_access_descriptor_beta_user(self):
mock_unit = Mock(user_partitions=[])
mock_unit._class_tags = {}
mock_unit.days_early_for_beta = 2
mock_unit.start = self.TOMORROW
mock_unit.visible_to_staff_only = False
self.assertTrue(bool(access._has_access_descriptor(
self.beta_user, 'load', mock_unit, course_key=self.course.id)))
@ddt.data(None, YESTERDAY, TOMORROW)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
@patch('courseware.access_utils.get_current_request_hostname', Mock(return_value='preview.localhost'))
def test__has_access_descriptor_in_preview_mode(self, start):
"""
Tests that descriptor has access in preview mode.
"""
mock_unit = Mock(location=self.course.location, user_partitions=[])
mock_unit._class_tags = {} # Needed for detached check in _has_access_descriptor
mock_unit.visible_to_staff_only = False
mock_unit.start = start
self.verify_access(mock_unit, True)
@ddt.data(
(TOMORROW, access_response.StartDateError),
(None, None),
(YESTERDAY, None)
) # ddt throws an error if I don't put the None argument there
@ddt.unpack
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
@patch('courseware.access_utils.get_current_request_hostname', Mock(return_value='localhost'))
def test__has_access_descriptor_when_not_in_preview_mode(self, start, expected_error_type):
"""
Tests that descriptor has no access when start date in future & without preview.
"""
expected_access = expected_error_type is None
mock_unit = Mock(location=self.course.location, user_partitions=[])
mock_unit._class_tags = {} # Needed for detached check in _has_access_descriptor
mock_unit.visible_to_staff_only = False
mock_unit.start = start
self.verify_access(mock_unit, expected_access, expected_error_type)
def test__has_access_course_can_enroll(self):
yesterday = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=1)
tomorrow = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=1)
# Non-staff can enroll if authenticated and specifically allowed for that course
# even outside the open enrollment period
user = UserFactory.create()
course = Mock(
enrollment_start=tomorrow, enrollment_end=tomorrow,
id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain=''
)
CourseEnrollmentAllowedFactory(email=user.email, course_id=course.id)
self.assertTrue(access._has_access_course(user, 'enroll', course))
# Staff can always enroll even outside the open enrollment period
user = StaffFactory.create(course_key=course.id)
self.assertTrue(access._has_access_course(user, 'enroll', course))
# Non-staff cannot enroll if it is between the start and end dates and invitation only
# and not specifically allowed
course = Mock(
enrollment_start=yesterday, enrollment_end=tomorrow,
id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain='',
invitation_only=True
)
user = UserFactory.create()
self.assertFalse(access._has_access_course(user, 'enroll', course))
# Non-staff can enroll if it is between the start and end dates and not invitation only
course = Mock(
enrollment_start=yesterday, enrollment_end=tomorrow,
id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain='',
invitation_only=False
)
self.assertTrue(access._has_access_course(user, 'enroll', course))
# Non-staff cannot enroll outside the open enrollment period if not specifically allowed
course = Mock(
enrollment_start=tomorrow, enrollment_end=tomorrow,
id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain='',
invitation_only=False
)
self.assertFalse(access._has_access_course(user, 'enroll', course))
def test__user_passed_as_none(self):
"""Ensure has_access handles a user being passed as null"""
access.has_access(None, 'staff', 'global', None)
def test__catalog_visibility(self):
"""
Tests the catalog visibility tri-states
"""
user = UserFactory.create()
course_id = SlashSeparatedCourseKey('edX', 'test', '2012_Fall')
staff = StaffFactory.create(course_key=course_id)
course = Mock(
id=course_id,
catalog_visibility=CATALOG_VISIBILITY_CATALOG_AND_ABOUT
)
self.assertTrue(access._has_access_course(user, 'see_in_catalog', course))
self.assertTrue(access._has_access_course(user, 'see_about_page', course))
self.assertTrue(access._has_access_course(staff, 'see_in_catalog', course))
self.assertTrue(access._has_access_course(staff, 'see_about_page', course))
# Now set visibility to just about page
course = Mock(
id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'),
catalog_visibility=CATALOG_VISIBILITY_ABOUT
)
self.assertFalse(access._has_access_course(user, 'see_in_catalog', course))
self.assertTrue(access._has_access_course(user, 'see_about_page', course))
self.assertTrue(access._has_access_course(staff, 'see_in_catalog', course))
self.assertTrue(access._has_access_course(staff, 'see_about_page', course))
# Now set visibility to none, which means neither in catalog nor about pages
course = Mock(
id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'),
catalog_visibility=CATALOG_VISIBILITY_NONE
)
self.assertFalse(access._has_access_course(user, 'see_in_catalog', course))
self.assertFalse(access._has_access_course(user, 'see_about_page', course))
self.assertTrue(access._has_access_course(staff, 'see_in_catalog', course))
self.assertTrue(access._has_access_course(staff, 'see_about_page', course))
@patch.dict("django.conf.settings.FEATURES", {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_access_on_course_with_pre_requisites(self):
"""
Test course access when a course has pre-requisite course yet to be completed
"""
user = UserFactory.create()
pre_requisite_course = CourseFactory.create(
org='test_org', number='788', run='test_run'
)
pre_requisite_courses = [unicode(pre_requisite_course.id)]
course = CourseFactory.create(
org='test_org', number='786', run='test_run', pre_requisite_courses=pre_requisite_courses
)
set_prerequisite_courses(course.id, pre_requisite_courses)
# user should not be able to load course even if enrolled
CourseEnrollmentFactory(user=user, course_id=course.id)
response = access._has_access_course(user, 'view_courseware_with_prerequisites', course)
self.assertFalse(response)
self.assertIsInstance(response, access_response.MilestoneError)
# Staff can always access course
staff = StaffFactory.create(course_key=course.id)
self.assertTrue(access._has_access_course(staff, 'view_courseware_with_prerequisites', course))
# User should be able access after completing required course
fulfill_course_milestone(pre_requisite_course.id, user)
self.assertTrue(access._has_access_course(user, 'view_courseware_with_prerequisites', course))
@ddt.data(
(True, True, True),
(False, False, True)
)
@ddt.unpack
def test__access_on_mobile(self, mobile_available, student_expected, staff_expected):
"""
Test course access on mobile for staff and students.
"""
descriptor = Mock(id=self.course.id, user_partitions=[])
descriptor._class_tags = {}
descriptor.visible_to_staff_only = False
descriptor.mobile_available = mobile_available
self.assertEqual(
bool(access._has_access_course(self.student, 'load_mobile', descriptor)),
student_expected
)
self.assertEqual(bool(access._has_access_course(self.staff, 'load_mobile', descriptor)), staff_expected)
@patch.dict("django.conf.settings.FEATURES", {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_courseware_page_unfulfilled_prereqs(self):
"""
Test courseware access when a course has pre-requisite course yet to be completed
"""
pre_requisite_course = CourseFactory.create(
org='edX',
course='900',
run='test_run',
)
pre_requisite_courses = [unicode(pre_requisite_course.id)]
course = CourseFactory.create(
org='edX',
course='1000',
run='test_run',
pre_requisite_courses=pre_requisite_courses,
)
set_prerequisite_courses(course.id, pre_requisite_courses)
test_password = 't3stp4ss.!'
user = UserFactory.create()
user.set_password(test_password)
user.save()
self.login(user.email, test_password)
CourseEnrollmentFactory(user=user, course_id=course.id)
url = reverse('courseware', args=[unicode(course.id)])
response = self.client.get(url)
self.assertRedirects(
response,
reverse(
'dashboard'
)
)
self.assertEqual(response.status_code, 302)
fulfill_course_milestone(pre_requisite_course.id, user)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@attr(shard=1)
class UserRoleTestCase(TestCase):
"""
Tests for user roles.
"""
def setUp(self):
super(UserRoleTestCase, self).setUp()
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
self.anonymous_user = AnonymousUserFactory()
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course_key=self.course_key)
self.course_instructor = InstructorFactory(course_key=self.course_key)
def _install_masquerade(self, user, role='student'):
"""
Installs a masquerade for the specified user.
"""
user.masquerade_settings = {
self.course_key: CourseMasquerade(self.course_key, role=role)
}
def test_user_role_staff(self):
"""Ensure that user role is student for staff masqueraded as student."""
self.assertEqual(
'staff',
access.get_user_role(self.course_staff, self.course_key)
)
# Masquerade staff
self._install_masquerade(self.course_staff)
self.assertEqual(
'student',
access.get_user_role(self.course_staff, self.course_key)
)
def test_user_role_instructor(self):
"""Ensure that user role is student for instructor masqueraded as student."""
self.assertEqual(
'instructor',
access.get_user_role(self.course_instructor, self.course_key)
)
# Masquerade instructor
self._install_masquerade(self.course_instructor)
self.assertEqual(
'student',
access.get_user_role(self.course_instructor, self.course_key)
)
def test_user_role_anonymous(self):
"""Ensure that user role is student for anonymous user."""
self.assertEqual(
'student',
access.get_user_role(self.anonymous_user, self.course_key)
)
@attr(shard=3)
@ddt.ddt
class CourseOverviewAccessTestCase(ModuleStoreTestCase):
"""
Tests confirming that has_access works equally on CourseDescriptors and
CourseOverviews.
"""
def setUp(self):
super(CourseOverviewAccessTestCase, self).setUp()
today = datetime.datetime.now(pytz.UTC)
last_week = today - datetime.timedelta(days=7)
next_week = today + datetime.timedelta(days=7)
self.course_default = CourseFactory.create()
self.course_started = CourseFactory.create(start=last_week)
self.course_not_started = CourseFactory.create(start=next_week, days_early_for_beta=10)
self.course_staff_only = CourseFactory.create(visible_to_staff_only=True)
self.course_mobile_available = CourseFactory.create(mobile_available=True)
self.course_with_pre_requisite = CourseFactory.create(
pre_requisite_courses=[str(self.course_started.id)]
)
self.course_with_pre_requisites = CourseFactory.create(
pre_requisite_courses=[str(self.course_started.id), str(self.course_not_started.id)]
)
self.user_normal = UserFactory.create()
self.user_beta_tester = BetaTesterFactory.create(course_key=self.course_not_started.id)
self.user_completed_pre_requisite = UserFactory.create()
fulfill_course_milestone(self.course_started.id, self.user_completed_pre_requisite)
self.user_staff = UserFactory.create(is_staff=True)
self.user_anonymous = AnonymousUserFactory.create()
COURSE_TEST_DATA = list(itertools.product(
['user_normal', 'user_staff', 'user_anonymous'],
['enroll', 'load', 'staff', 'instructor', 'see_exists', 'see_in_catalog', 'see_about_page'],
['course_default', 'course_started', 'course_not_started', 'course_staff_only'],
))
LOAD_MOBILE_TEST_DATA = list(itertools.product(
['user_normal', 'user_staff'],
['load_mobile'],
['course_default', 'course_mobile_available'],
))
PREREQUISITES_TEST_DATA = list(itertools.product(
['user_normal', 'user_completed_pre_requisite', 'user_staff', 'user_anonymous'],
['view_courseware_with_prerequisites'],
['course_default', 'course_with_pre_requisite', 'course_with_pre_requisites'],
))
@ddt.data(*(COURSE_TEST_DATA + LOAD_MOBILE_TEST_DATA + PREREQUISITES_TEST_DATA))
@ddt.unpack
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_course_overview_access(self, user_attr_name, action, course_attr_name):
"""
Check that a user's access to a course is equal to the user's access to
the corresponding course overview.
Instead of taking a user and course directly as arguments, we have to
take their attribute names, as ddt doesn't allow us to reference self.
Arguments:
user_attr_name (str): the name of the attribute on self that is the
User to test with.
action (str): action to test with.
course_attr_name (str): the name of the attribute on self that is
the CourseDescriptor to test with.
"""
user = getattr(self, user_attr_name)
course = getattr(self, course_attr_name)
course_overview = CourseOverview.get_from_id(course.id)
self.assertEqual(
bool(access.has_access(user, action, course, course_key=course.id)),
bool(access.has_access(user, action, course_overview, course_key=course.id))
)
def test_course_overview_unsupported_action(self):
"""
Check that calling has_access with an unsupported action raises a
ValueError.
"""
overview = CourseOverview.get_from_id(self.course_default.id)
with self.assertRaises(ValueError):
access.has_access(self.user, '_non_existent_action', overview)
@ddt.data(
*itertools.product(
['user_normal', 'user_staff', 'user_anonymous'],
['see_exists', 'see_in_catalog', 'see_about_page'],
['course_default', 'course_started', 'course_not_started'],
)
)
@ddt.unpack
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_course_catalog_access_num_queries(self, user_attr_name, action, course_attr_name):
course = getattr(self, course_attr_name)
# get a fresh user object that won't have any cached role information
if user_attr_name == 'user_anonymous':
user = AnonymousUserFactory()
else:
user = getattr(self, user_attr_name)
user = User.objects.get(id=user.id)
if user_attr_name == 'user_staff' and action == 'see_exists' and course_attr_name == 'course_not_started':
# checks staff role
num_queries = 1
elif user_attr_name == 'user_normal' and action == 'see_exists' and course_attr_name != 'course_started':
# checks staff role and enrollment data
num_queries = 2
else:
num_queries = 0
course_overview = CourseOverview.get_from_id(course.id)
with self.assertNumQueries(num_queries):
bool(access.has_access(user, action, course_overview, course_key=course.id))
|
lifanov/cobbler
|
refs/heads/master
|
tests/cobbler_xmlrpc_test.py
|
16
|
import logging
import os
import random
import re
import sys
import time
import unittest
import xmlrpclib
from cobbler.remote import EVENT_COMPLETE
from cobbler.utils import local_get_cobbler_api_url, get_shared_secret
FAKE_INITRD="initrd1.img"
FAKE_INITRD2="initrd2.img"
FAKE_INITRD3="initrd3.img"
FAKE_KERNEL="vmlinuz1"
FAKE_KERNEL2="vmlinuz2"
FAKE_KERNEL3="vmlinuz3"
TEST_POWER_MANAGEMENT = True
TEST_SYSTEM = ""
cleanup_dirs = []
def tprint(call_name):
"""
Print a remote call debug message
@param str call_name remote call name
"""
print("test remote call: %s()" % call_name)
# TODO: test remote.background_aclsetup()
# TODO: test remote.background_buildiso()
# TODO: test remote.background_dlcontent()
# TODO: test remote.background_hardlink()
# TODO: test remote.background_import()
# TODO: test remote.background_replicate()
# TODO: test remote.background_reposync()
# TODO: test remote.background_validateks()
# TODO: test remote.clear_system_logs()
# TODO: test remote.disable_netboot()
# TODO: test remote.extended_version()
# TODO: test remote.find_items_paged()
# TODO: test remote.find_system_by_dns_name()
# TODO: test remote.generatescript()
# TODO: test remote.get_<item>_as_rendered()
# TODO: test remote.get_<item>s_since()
# TODO: test remote.get_authn_module_name()
# TODO: test remote.get_blended_data()
# TODO: test remote.get_config_dataa()
# TODO: test remote.get_repos_compatible_with_profile()
# TODO: test remote.get_status()
# TODO: test remote.get_template_file_for_profile()
# TODO: test remote.get_template_file_for_system()
# TODO: test remote.is_kickstart_in_use()
# TODO: test remote.logout()
# TODO: test remote.modify_setting()
# TODO: test remote.read_or_write_kickstart_template()
# TODO: test remote.read_or_write_snippet()
# TODO: test remote.run_install_triggers()
# TODO: test remote.version()
# TODO: test remote.xapi_object_edit()
class CobblerXmlRpcTest(unittest.TestCase):
def setUp(self):
"""
Setup Cobbler XML-RPC connection and login
"""
# create logger
logging.basicConfig( stream=sys.stderr )
self.logger = logging.getLogger( self.__class__.__name__ )
self.logger.setLevel( logging.DEBUG )
# create XML-RPC client and connect to server
api_url = local_get_cobbler_api_url()
self.remote = xmlrpclib.Server(api_url, allow_none=True)
shared_secret = get_shared_secret()
self.token = self.remote.login("", shared_secret)
if not self.token:
sys.exit(1)
def tearDown(self):
"""
Cleanup here
"""
return
class Test_DistroProfileSystem(CobblerXmlRpcTest):
"""
Test remote calls related to distros, profiles and systems
These item types are tested together because they have inter-dependencies
"""
def setUp(self):
super(Test_DistroProfileSystem, self).setUp()
# Create temp dir
self.topdir = "/tmp/cobbler_test"
try:
os.makedirs(self.topdir)
except:
pass
# create temp files
self.fk_initrd = os.path.join(self.topdir, FAKE_INITRD)
self.fk_initrd2 = os.path.join(self.topdir, FAKE_INITRD2)
self.fk_initrd3 = os.path.join(self.topdir, FAKE_INITRD3)
self.fk_kernel = os.path.join(self.topdir, FAKE_KERNEL)
self.fk_kernel2 = os.path.join(self.topdir, FAKE_KERNEL2)
self.fk_kernel3 = os.path.join(self.topdir, FAKE_KERNEL3)
self.redhat_kickstart = os.path.join(self.topdir, "test.ks")
self.suse_autoyast = os.path.join(self.topdir, "test.xml")
self.ubuntu_preseed = os.path.join(self.topdir, "test.seed")
self.files_create = [
self.fk_initrd, self.fk_initrd2, self.fk_initrd3,
self.fk_kernel, self.fk_kernel2, self.fk_kernel3,
self.redhat_kickstart, self.suse_autoyast, self.ubuntu_preseed
]
for fn in self.files_create:
f = open(fn,"w+")
f.close()
self.distro_fields = [
# field format: field_name, good value(s), bad value(s)
# field order is the order in which they will be set
# TODO: include fields with dependencies: fetchable files, boot files, etc.
["arch",["i386","x86_64","ppc","ppc64"],["badarch"]],
# generic must be last breed to be set so os_version test below will work
["breed",["debian","freebsd","redhat","suse","ubuntu","unix","vmware","windows","xen", "generic"],["badbreed"]],
["comment",["test comment",],[]],
["initrd",[self.fk_initrd,],["",]],
["name",["testdistro0"],[]],
["kernel",[self.fk_kernel,],["",]],
["kernel_options",["a=1 b=2 c=3 c=4 c=5 d e",],[]],
["kernel_options_post",["a=1 b=2 c=3 c=4 c=5 d e",],[]],
["ks_meta",["a=1 b=2 c=3 c=4 c=5 d e",],[]],
["mgmt_classes",["one two three",],[]],
["os_version",["generic26",],["bados",]],
["owners",["user1 user2 user3",],[]],
]
self.profile_fields = [
# field format: field_name, good value(s), bad value(s)
# TODO: include fields with dependencies: fetchable files, boot files,
# template files, repos
["comment",["test comment"],[]],
["dhcp_tag",["","foo"],[]],
["distro",["testdistro0"],["baddistro",]],
["enable_gpxe",["yes","YES","1","0","no"],[]],
["enable_menu",["yes","YES","1","0","no"],[]],
["kernel_options",["a=1 b=2 c=3 c=4 c=5 d e"],[]],
["kernel_options_post",["a=1 b=2 c=3 c=4 c=5 d e"],[]],
["kickstart",[self.redhat_kickstart,self.suse_autoyast,self.ubuntu_preseed],["/path/to/bad/kickstart",]],
["ks_meta",["a=1 b=2 c=3 c=4 c=5 d e",],[]],
["mgmt_classes",["one two three",],[]],
["mgmt_parameters",["<<inherit>>"],["badyaml"]], # needs more test cases that are valid yaml
["name",["testprofile0"],[]],
["name_servers",["1.1.1.1 1.1.1.2 1.1.1.3"],[]],
["name_servers_search",["example.com foo.bar.com"],[]],
["owners",["user1 user2 user3"],[]],
["proxy",["testproxy"],[]],
["server",["1.1.1.1"],[]],
["virt_auto_boot",["1","0"],["yes","no"]],
["virt_bridge",["<<inherit>>","br0","virbr0","xenbr0"],[]],
["virt_cpus",["<<inherit>>","1","2"],["a"]],
["virt_disk_driver",["<<inherit>>","raw","qcow2","vmdk"],[]],
["virt_file_size",["<<inherit>>","5","10"],["a"]],
["virt_path",["<<inherit>>","/path/to/test",],[]],
["virt_ram",["<<inherit>>","256","1024"],["a",]],
["virt_type",["<<inherit>>","xenpv","xenfv","qemu","kvm","vmware","openvz"],["bad",]],
]
self.system_fields = [
# field format: field_name, good value(s), bad value(s)
# TODO: include fields with dependencies: fetchable files, boot files,
# template files, images
["comment",["test comment"],[]],
["enable_gpxe",["yes","YES","1","0","no"],[]],
["kernel_options",["a=1 b=2 c=3 c=4 c=5 d e"],[]],
["kernel_options_post",["a=1 b=2 c=3 c=4 c=5 d e"],[]],
["kickstart",[self.redhat_kickstart,self.suse_autoyast,self.ubuntu_preseed],["/path/to/bad/kickstart",]],
["ks_meta",["a=1 b=2 c=3 c=4 c=5 d e",],[]],
["mgmt_classes",["one two three",],[]],
["mgmt_parameters",["<<inherit>>"],["badyaml"]], # needs more test cases that are valid yaml
["name",["testsystem0"],[]],
["netboot_enabled",["yes","YES","1","0","no"],[]],
["owners",["user1 user2 user3"],[]],
["profile",["testprofile0"],["badprofile",]],
["repos_enabled", [], []],
["status",["development","testing","acceptance","production"],[]],
["proxy",["testproxy"],[]],
["server",["1.1.1.1"],[]],
["virt_auto_boot",["1","0"],["yes","no"]],
["virt_cpus",["<<inherit>>","1","2"],["a"]],
["virt_file_size",["<<inherit>>","5","10"],["a"]],
["virt_disk_driver",["<<inherit>>","raw","qcow2","vmdk"],[]],
["virt_ram",["<<inherit>>","256","1024"],["a",]],
["virt_type",["<<inherit>>","xenpv","xenfv","qemu","kvm","vmware","openvz"],["bad",]],
["virt_path",["<<inherit>>","/path/to/test",],[]],
["virt_pxe_boot",["1", "0"],[]],
# network
["gateway", [], []],
["hostname", ["test"], []],
["ipv6_autoconfiguration", [], []],
["ipv6_default_device", [], []],
["name_servers", ["9.1.1.3"], []],
["name_servers_search", [], []],
# network - network interface specific
# TODO: test these fields
["bonding_opts-eth0", [], []],
["bridge_opts-eth0", [], []],
["cnames-eth0", [], []],
["dhcp_tag-eth0", [], []],
["dns_name-eth0", [], []],
["if_gateway-eth0", [], []],
["interface_type-eth0", [], []],
["interface_master-eth0", [], []],
["ip_address-eth0", [], []],
["ipv6_address-eth0", [], []],
["ipv6_secondaries-eth0", [], []],
["ipv6_mtu-eth0", [], []],
["ipv6_static_routes-eth0", [], []],
["ipv6_default_gateway-eth0", [], []],
["mac_address-eth0", [], []],
["mtu-eth0", [], []],
["management-eth0", [], []],
["netmask-eth0", [], []],
["static-eth0", [], []],
["static_routes-eth0", [], []],
["virt_bridge-eth0", [], []],
# power management
["power_type", ["lpar"], ["bla"]],
["power_address", ["127.0.0.1"], []],
["power_id", ["pmachine:lpar1"], []],
["power_pass", ["pass"], []],
["power_user", ["user"], []]
]
def tearDown(self):
super(Test_DistroProfileSystem, self).tearDown()
for fn in self.files_create:
os.remove(fn)
def _get_distros(self):
"""
Test: get distros
"""
tprint("get_distros")
self.remote.get_distros(self.token)
def _get_profiles(self):
"""
Test: get profiles
"""
tprint("get_profiles")
self.remote.get_profiles(self.token)
def _get_systems(self):
"""
Test: get systems
"""
tprint("get_systems")
self.remote.get_systems(self.token)
def _create_distro(self):
"""
Test: create/edit a distro
"""
distros = self.remote.get_distros(self.token)
tprint("new_distro")
distro = self.remote.new_distro(self.token)
tprint("modify_distro")
for field in self.distro_fields:
(fname, fgood, fbad) = field
for fb in fbad:
try:
self.remote.modify_distro(distro, fname, fb, self.token)
except:
pass
else:
self.fail("bad field (%s=%s) did not raise an exception" % (fname,fb))
for fg in fgood:
try:
result = self.remote.modify_distro(distro, fname, fg, self.token)
self.assertTrue(result)
except Exception as e:
self.fail("good field (%s=%s) raised exception: %s" % (fname,fg, str(e)))
tprint("save_distro")
self.assertTrue(self.remote.save_distro(distro, self.token))
# FIXME: if field in item_<type>.FIELDS defines possible values,
# test all of them. This is valid for all item types
# for field in item_system.FIELDS:
# (fname,def1,def2,display,editable,tooltip,values,type) = field
# if fname not in ["name","distro","parent"] and editable:
# if values and isinstance(values,list):
# fvalue = random.choice(values)
# else:
# fvalue = "testing_" + fname
# self.assertTrue(self.remote.modify_profile(subprofile,fname,fvalue,self.token))
new_distros = self.remote.get_distros(self.token)
self.assertTrue(len(new_distros) == len(distros) + 1)
def _create_profile(self):
"""
Test: create/edit a profile object"""
profiles = self.remote.get_profiles(self.token)
tprint("new_profile")
profile = self.remote.new_profile(self.token)
tprint("modify_profile")
for field in self.profile_fields:
(fname,fgood,fbad) = field
for fb in fbad:
try:
self.remote.modify_profile(profile,fname,fb,self.token)
except:
pass
else:
self.fail("bad field (%s=%s) did not raise an exception" % (fname,fb))
for fg in fgood:
try:
self.assertTrue(self.remote.modify_profile(profile,fname,fg,self.token))
except Exception as e:
self.fail("good field (%s=%s) raised exception: %s" % (fname,fg, str(e)))
tprint("save_profile")
self.assertTrue(self.remote.save_profile(profile,self.token))
new_profiles = self.remote.get_profiles(self.token)
self.assertTrue(len(new_profiles) == len(profiles) + 1)
def _create_subprofile(self):
"""
Test: create/edit a subprofile object"""
profiles = self.remote.get_profiles(self.token)
tprint("new_subprofile")
subprofile = self.remote.new_subprofile(self.token)
tprint("modify_profile")
self.assertTrue(self.remote.modify_profile(subprofile,"name","testsubprofile0",self.token))
self.assertTrue(self.remote.modify_profile(subprofile,"parent","testprofile0",self.token))
tprint("save_profile")
self.assertTrue(self.remote.save_profile(subprofile,self.token))
new_profiles = self.remote.get_profiles(self.token)
self.assertTrue(len(new_profiles) == len(profiles) + 1)
def _create_system(self):
"""
Test: create/edit a system object
"""
systems = self.remote.get_systems(self.token)
tprint("new_system")
system = self.remote.new_system(self.token)
tprint("modify_system")
self.assertTrue(self.remote.modify_system(system,"name","testsystem0",self.token))
self.assertTrue(self.remote.modify_system(system,"profile","testprofile0",self.token))
for field in self.system_fields:
(fname,fgood,fbad) = field
for fb in fbad:
try:
self.remote.modify_system(system,fname,fb,self.token)
except:
pass
else:
self.fail("bad field (%s=%s) did not raise an exception" % (fname,fb))
for fg in fgood:
try:
self.assertTrue(self.remote.modify_system(system,fname,fg,self.token))
except Exception as e:
self.fail("good field (%s=%s) raised exception: %s" % (fname,fg, str(e)))
tprint("save_system")
self.assertTrue(self.remote.save_system(system,self.token))
new_systems = self.remote.get_systems(self.token)
self.assertTrue(len(new_systems) == len(systems) + 1)
def _get_distro(self):
"""
Test: get a distro object"""
tprint("get_distro")
distro = self.remote.get_distro("testdistro0")
def _get_profile(self):
"""
Test: get a profile object"""
tprint("get_profile")
profile = self.remote.get_profile("testprofile0")
def _get_system(self):
"""
Test: get a system object"""
tprint("get_system")
system = self.remote.get_system("testsystem0")
def _find_distro(self):
"""
Test: find a distro object
"""
tprint("find_distro")
result = self.remote.find_distro({"name":"testdistro0"}, self.token)
self.assertTrue(result)
def _find_profile(self):
"""
Test: find a profile object
"""
tprint("find_profile")
result = self.remote.find_profile({"name":"testprofile0"}, self.token)
self.assertTrue(result)
def _find_system(self):
"""
Test: find a system object
"""
tprint("find_system")
result = self.remote.find_system({"name":"testsystem0"}, self.token)
self.assertTrue(result)
def _copy_distro(self):
"""
Test: copy a distro object
"""
tprint("copy_distro")
distro = self.remote.get_item_handle("distro","testdistro0",self.token)
self.assertTrue(self.remote.copy_distro(distro,"testdistrocopy",self.token))
def _copy_profile(self):
"""
Test: copy a profile object
"""
tprint("copy_profile")
profile = self.remote.get_item_handle("profile","testprofile0",self.token)
self.assertTrue(self.remote.copy_profile(profile,"testprofilecopy",self.token))
def _copy_system(self):
"""
Test: copy a system object
"""
tprint("copy_system")
system = self.remote.get_item_handle("system","testsystem0",self.token)
self.assertTrue(self.remote.copy_system(system,"testsystemcopy",self.token))
def _rename_distro(self):
"""
Test: rename a distro object
"""
tprint("rename_distro")
distro = self.remote.get_item_handle("distro","testdistrocopy",self.token)
self.assertTrue(self.remote.rename_distro(distro,"testdistro1",self.token))
def _rename_profile(self):
"""
Test: rename a profile object
"""
tprint("rename_profile")
profile = self.remote.get_item_handle("profile","testprofilecopy",self.token)
self.assertTrue(self.remote.rename_profile(profile,"testprofile1",self.token))
def _rename_system(self):
"""
Test: rename a system object
"""
tprint("rename_system")
system = self.remote.get_item_handle("system","testsystemcopy",self.token)
self.assertTrue(self.remote.rename_system(system,"testsystem1",self.token))
def _remove_distro(self):
"""
Test: remove a distro object
"""
tprint("remove_distro")
self.assertTrue(self.remote.remove_distro("testdistro0",self.token))
self.assertTrue(self.remote.remove_distro("testdistro1",self.token))
def _remove_profile(self):
"""
Test: remove a profile object
"""
tprint("remove_profile")
self.assertTrue(self.remote.remove_profile("testsubprofile0",self.token))
self.assertTrue(self.remote.remove_profile("testprofile0",self.token))
self.assertTrue(self.remote.remove_profile("testprofile1",self.token))
def _remove_system(self):
"""
Test: remove a system object
"""
tprint("remove_system")
self.assertTrue(self.remote.remove_system("testsystem0",self.token))
self.assertTrue(self.remote.remove_system("testsystem1",self.token))
def _get_repo_config_for_profile(self):
"""
Test: get repository configuration of a profile
"""
self.remote.get_repo_config_for_profile("testprofile0")
def _get_repo_config_for_system(self):
"""
Test: get repository configuration of a system
"""
self.remote.get_repo_config_for_system("testprofile0")
def test_distro_profile_system(self):
"""
Test remote calls related to distro, profile and system
"""
self._get_distros()
self._create_distro()
self._get_distro()
self._find_distro()
self._copy_distro()
self._rename_distro()
self._get_profiles()
self._create_profile()
self._create_subprofile()
self._get_profile()
self._find_profile()
self._copy_profile()
self._rename_profile()
self._get_repo_config_for_profile()
self._get_systems()
self._create_system()
self._get_system()
self._find_system()
self._copy_system()
self._rename_system()
self._get_repo_config_for_system()
self._remove_system()
self._remove_profile()
self._remove_distro()
class Test_Repo(CobblerXmlRpcTest):
def _create_repo(self):
"""
Test: create/edit a repo object
"""
repos = self.remote.get_repos(self.token)
tprint("new_repo")
repo = self.remote.new_repo(self.token)
tprint("modify_repo")
self.assertTrue(self.remote.modify_repo(repo, "name", "testrepo0", self.token))
self.assertTrue(self.remote.modify_repo(repo, "mirror", "http://www.sample.com/path/to/some/repo", self.token))
self.assertTrue(self.remote.modify_repo(repo, "mirror_locally", "0", self.token))
tprint("save_repo")
self.assertTrue(self.remote.save_repo(repo, self.token))
new_repos = self.remote.get_repos(self.token)
self.assertTrue(len(new_repos) == len(repos) + 1)
def _get_repos(self):
"""
Test: Get repos
"""
tprint("get_repos")
self.remote.get_repos()
def _get_repo(self):
"""
Test: Get a repo object
"""
tprint("get_repo")
repo = self.remote.get_repo("testrepo0")
def _find_repo(self):
"""
Test: find a repo object
"""
tprint("find_repo")
result = self.remote.find_repo({"name":"testrepo0"}, self.token)
self.assertTrue(result)
def _copy_repo(self):
"""
Test: copy a repo object
"""
tprint("copy_repo")
repo = self.remote.get_item_handle("repo","testrepo0",self.token)
self.assertTrue(self.remote.copy_repo(repo,"testrepocopy",self.token))
def _rename_repo(self):
"""
Test: rename a repo object
"""
tprint("rename_repo")
repo = self.remote.get_item_handle("repo","testrepocopy",self.token)
self.assertTrue(self.remote.rename_repo(repo,"testrepo1",self.token))
def _remove_repo(self):
"""
Test: remove a repo object
"""
tprint("remove_repo")
self.assertTrue(self.remote.remove_repo("testrepo0",self.token))
self.assertTrue(self.remote.remove_repo("testrepo1",self.token))
def test_repo(self):
self._get_repos()
self._create_repo()
self._get_repo()
self._find_repo()
self._copy_repo()
self._rename_repo()
self._remove_repo()
class Test_MgmtClass(CobblerXmlRpcTest):
def _create_mgmtclass(self):
"""
Test: create/edit a mgmtclass object
"""
mgmtclasses = self.remote.get_mgmtclasses(self.token)
tprint("new_mgmtclass")
mgmtclass = self.remote.new_mgmtclass(self.token)
tprint("modify_mgmtclass")
self.assertTrue(self.remote.modify_mgmtclass(mgmtclass,"name","testmgmtclass0",self.token))
tprint("save_mgmtclass")
self.assertTrue(self.remote.save_mgmtclass(mgmtclass,self.token))
new_mgmtclasses = self.remote.get_mgmtclasses(self.token)
self.assertTrue(len(new_mgmtclasses) == len(mgmtclasses) + 1)
def _get_mgmtclasses(self):
"""
Test: Get mgmtclasses objects
"""
tprint("get_mgmtclasses")
self.remote.get_mgmtclasses()
def _get_mgmtclass(self):
"""
Test: get a mgmtclass object
"""
tprint("get_mgmtclass")
mgmtclass = self.remote.get_mgmtclass("testmgmtclass0")
def _find_mgmtclass(self):
"""
Test: find a mgmtclass object
"""
tprint("find_mgmtclass")
result = self.remote.find_mgmtclass({"name":"testmgmtclass0"}, self.token)
self.assertTrue(result)
def _copy_mgmtclass(self):
"""
Test: copy a mgmtclass object
"""
tprint("copy_mgmtclass")
mgmtclass = self.remote.get_item_handle("mgmtclass","testmgmtclass0",self.token)
self.assertTrue(self.remote.copy_mgmtclass(mgmtclass,"testmgmtclasscopy",self.token))
def _rename_mgmtclass(self):
"""
Test: rename a mgmtclass object
"""
tprint("rename_mgmtclass")
mgmtclass = self.remote.get_item_handle("mgmtclass","testmgmtclasscopy",self.token)
self.assertTrue(self.remote.rename_mgmtclass(mgmtclass,"testmgmtclass1",self.token))
def _remove_mgmtclass(self):
"""
Test: remove a mgmtclass object
"""
tprint("remove_mgmtclass")
self.assertTrue(self.remote.remove_mgmtclass("testmgmtclass0",self.token))
self.assertTrue(self.remote.remove_mgmtclass("testmgmtclass1",self.token))
def test_mgmtclass(self):
self._get_mgmtclasses()
self._create_mgmtclass()
self._get_mgmtclass()
self._find_mgmtclass()
self._copy_mgmtclass()
self._rename_mgmtclass()
self._remove_mgmtclass()
class Test_Image(CobblerXmlRpcTest):
def _create_image(self):
"""
Test: create/edit of an image object"""
images = self.remote.get_images(self.token)
tprint("new_image")
image = self.remote.new_image(self.token)
tprint("modify_image")
self.assertTrue(self.remote.modify_image(image,"name","testimage0",self.token))
tprint("save_image")
self.assertTrue(self.remote.save_image(image,self.token))
new_images = self.remote.get_images(self.token)
self.assertTrue(len(new_images) == len(images) + 1)
def _get_images(self):
"""
Test: get images
"""
tprint("get_images")
self.remote.get_images()
def _get_image(self):
"""
Test: Get an image object
"""
tprint("get_image")
image = self.remote.get_image("testimage0")
def _find_image(self):
"""
Test: Find an image object
"""
tprint("find_image")
result = self.remote.find_image({"name":"testimage0"}, self.token)
self.assertTrue(result)
def _copy_image(self):
"""
Test: Copy an image object
"""
tprint("find_image")
image = self.remote.get_item_handle("image","testimage0",self.token)
self.assertTrue(self.remote.copy_image(image,"testimagecopy",self.token))
def _rename_image(self):
"""
Test: Rename an image object
"""
tprint("rename_image")
image = self.remote.get_item_handle("image","testimagecopy",self.token)
self.assertTrue(self.remote.rename_image(image,"testimage1",self.token))
def _remove_image(self):
"""
Test: remove an image object
"""
tprint("remove_image")
self.assertTrue(self.remote.remove_image("testimage0",self.token))
self.assertTrue(self.remote.remove_image("testimage1",self.token))
def test_image(self):
self._get_images()
self._create_image()
self._get_image()
self._find_image()
self._copy_image()
self._rename_image()
self._remove_image()
class Test_Package(CobblerXmlRpcTest):
def _create_package(self):
"""
Test: create/edit a package object
"""
packages = self.remote.get_packages(self.token)
tprint("get_packages")
packages = self.remote.get_packages(self.token)
tprint("new_package")
package = self.remote.new_package(self.token)
tprint("modify_package")
self.assertTrue(self.remote.modify_package(package,"name","testpackage0",self.token))
tprint("save_package")
self.assertTrue(self.remote.save_package(package, self.token))
new_packages = self.remote.get_packages(self.token)
self.assertTrue(len(new_packages) == len(packages) + 1)
def _get_packages(self):
"""
Test: Get packages
"""
tprint("get_package")
package = self.remote.get_packages()
def _get_package(self):
"""
Test: Get a package object
"""
tprint("get_package")
package = self.remote.get_package("testpackage0")
def _find_package(self):
"""
Test: find a package object
"""
tprint("find_package")
result = self.remote.find_package({"name":"testpackage0"}, self.token)
self.assertTrue(result)
def _copy_package(self):
"""
Test: copy a package object
"""
tprint("copy_package")
package = self.remote.get_item_handle("package","testpackage0",self.token)
self.assertTrue(self.remote.copy_package(package,"testpackagecopy",self.token))
def _rename_package(self):
"""
Test: rename a package object
"""
tprint("rename_package")
package = self.remote.get_item_handle("package","testpackagecopy",self.token)
self.assertTrue(self.remote.rename_package(package,"testpackage1",self.token))
def _remove_package(self):
"""
Test: remove a package object
"""
tprint("remove_package")
self.assertTrue(self.remote.remove_package("testpackage0",self.token))
self.assertTrue(self.remote.remove_package("testpackage1",self.token))
def test_package(self):
self._get_packages()
self._create_package()
self._get_package()
self._find_package()
self._copy_package()
self._rename_package()
self._remove_package()
class Test_File(CobblerXmlRpcTest):
"""
Test remote calls related to files
"""
def _create_file(self):
files = self.remote.get_files(self.token)
tprint("new_file")
file_id = self.remote.new_file(self.token)
tprint("modify_file")
self.remote.modify_file(file_id, "name", "testfile0", self.token)
self.remote.modify_file(file_id, "is_directory", "False", self.token)
self.remote.modify_file(file_id, "action", "create", self.token)
self.remote.modify_file(file_id, "group", "root", self.token)
self.remote.modify_file(file_id, "mode", "0644", self.token)
self.remote.modify_file(file_id, "owner", "root", self.token)
self.remote.modify_file(file_id, "path", "/root/testfile0", self.token)
self.remote.modify_file(file_id, "template", "testtemplate0", self.token)
tprint("save_file")
self.remote.save_file(file_id, self.token)
new_files = self.remote.get_files(self.token)
self.assertTrue(len(new_files) == len(files) + 1)
def _get_files(self):
"""
Test: get files
"""
tprint("get_files")
self.remote.get_files(self.token)
def _get_file(self):
"""
Test: Get a file object
"""
tprint("get_file")
file = self.remote.get_file("testfile0")
def _find_file(self):
"""
Test: find a file object
"""
tprint("find_file")
result = self.remote.find_file({"name":"testfile0"}, self.token)
self.assertTrue(result)
def _copy_file(self):
"""
Test: copy a file object
"""
tprint("copy_file")
file = self.remote.get_item_handle("file", "testfile0", self.token)
self.assertTrue(self.remote.copy_file(file, "testfilecopy", self.token))
def _rename_file(self):
"""
Test: rename a file object
"""
tprint("rename_file")
file = self.remote.get_item_handle("file","testfilecopy",self.token)
self.assertTrue(self.remote.rename_file(file,"testfile1",self.token))
def _remove_file(self):
"""
Test: remove a file object
"""
tprint("remove_file")
self.assertTrue(self.remote.remove_file("testfile0",self.token))
self.assertTrue(self.remote.remove_file("testfile1",self.token))
def test_file(self):
self._get_files()
self._create_file()
self._get_file()
self._find_file()
self._copy_file()
self._rename_file()
self._remove_file()
class Test_Item(CobblerXmlRpcTest):
"""
Test item
"""
def _get_item(self, type):
"""
Test: get a generic item
@param str type item type
"""
tprint("get_item")
item = self.remote.get_item(type, "test%s2" % type)
def _find_item(self, type):
"""
Test: find a generic item
@param str type item type
"""
tprint("find_items")
result = self.remote.find_items(type, {"name":"test%s2" % type}, None, False)
self.assertTrue(len(result) > 0)
def _copy_item(self, type):
"""
Test: copy a generic item
@param str type item type
"""
tprint("copy_item")
item_id = self.remote.get_item_handle(type, "test%s2" % type, self.token)
result = self.remote.copy_item(type, item_id, "test%scopy" % type, self.token)
self.assertTrue(result)
def _has_item(self, type):
"""
Test: check if an item is in a item collection
@param str type item type
"""
tprint("has_item")
result = self.remote.has_item(type, "test%s2" % type, self.token)
self.assertTrue(result)
def _rename_item(self, type):
"""
Test: rename a generic item
@param str type item type
"""
tprint("rename_item")
item_id = self.remote.get_item_handle(type, "test%scopy" % type, self.token)
result = self.remote.rename_item(type, item_id, "test%s3" % type, self.token)
self.assertTrue(result)
def _remove_item(self, type):
"""
Test: remove a generic item
@param str type item type
"""
tprint("remove_item")
self.assertTrue(self.remote.remove_item(type, "test%s2" % type, self.token))
self.assertTrue(self.remote.remove_item(type, "test%s3" % type, self.token))
def test_item(self):
type = "mgmtclass"
tprint("get_item_names")
items_names = self.remote.get_item_names(type)
# create an item of the type defined above
item_id = self.remote.new_mgmtclass(self.token)
self.remote.modify_item(type, item_id, "name", "test%s2" % type, self.token)
result = self.remote.save_item(type, item_id, self.token)
self.assertTrue(result)
new_items_names = self.remote.get_item_names(type)
self.assertTrue(len(new_items_names) == len(items_names) + 1)
self._get_item(type)
self._find_item(type)
self._copy_item(type)
self._rename_item(type)
self._remove_item(type)
new_items_names = self.remote.get_item_names(type)
self.assertTrue(len(new_items_names) == len(items_names))
class Test_NonObjectCalls(CobblerXmlRpcTest):
def _wait_task_end(self, tid):
"""
Wait until a task is finished
"""
timeout = 0
while self.remote.get_task_status(tid)[2] != EVENT_COMPLETE:
print("task %s status: %s" % (tid, self.remote.get_task_status(tid)))
time.sleep(5)
timeout += 5
if timeout == 60:
raise Exception
def test_token(self):
"""
Test: authentication token validation
"""
assert self.token not in ("",None)
def test_get_user_from_token(self):
"""
Test: get user data from authentication token
"""
tprint("get_user_from_token")
self.assertTrue(self.remote.get_user_from_token(self.token))
def test_check(self):
"""
Test: check Cobbler status
"""
tprint("check")
self.assertTrue(self.remote.check(self.token))
def test_last_modified_time(self):
"""
Test: get last modification time
"""
tprint("last_modified_time")
assert self.remote.last_modified_time(self.token) != 0
def test_power_system(self):
"""
Test: reboot a system
"""
if TEST_SYSTEM and TEST_POWER_MANAGEMENT:
tprint("background_power_system")
tid = self.remote.background_power_system({"systems": [TEST_SYSTEM],
"power": "reboot"},
self.token)
self._wait_task_end(tid)
def test_sync(self):
"""
Test: synchronize Cobbler internal data with managed services
(dhcp, tftp, dns)
"""
tprint("background_sync")
tid = self.remote.background_sync({}, self.token)
tprint("get_events")
events = self.remote.get_events(self.token)
self.assertTrue(len(events) > 0)
self._wait_task_end(tid)
tprint("get_event_log")
event_log = self.remote.get_event_log(tid)
def test_get_kickstart_templates(self):
"""
Test: get kickstart templates
"""
tprint("get_kickstart_templates")
result = self.remote.get_kickstart_templates()
self.assertTrue(len(result) > 0)
def test_get_snippets(self):
"""
Test: get snippets
"""
tprint("get_snippets")
result = self.remote.get_snippets(self.token)
self.assertTrue(len(result) > 0)
def test_generate_kickstart(self):
"""
Test: generate kickstart content
"""
if TEST_SYSTEM:
tprint("generate_kickstart")
self.remote.generate_kickstart(None, TEST_SYSTEM)
def test_generate_gpxe(self):
"""
Test: generate GPXE file content
"""
if TEST_SYSTEM:
tprint("generate_gpxe")
self.remote.generate_gpxe(None, TEST_SYSTEM)
def test_generate_bootcfg(self):
"""
Test: generate boot loader configuration file content
"""
if TEST_SYSTEM:
tprint("generate_bootcfg")
self.remote.generate_bootcfg(None, TEST_SYSTEM)
def test_get_settings(self):
"""
Test: get settings
"""
tprint("get_settings")
self.remote.get_settings(self.token)
def test_get_signatures(self):
"""
Test: get distro signatures
"""
tprint("get_signatures")
self.remote.get_signatures(self.token)
def test_get_valid_breeds(self):
"""
Test: get valid OS breeds
"""
tprint("get_valid_breeds")
breeds = self.remote.get_valid_breeds(self.token)
self.assertTrue(len(breeds) > 0)
def test_get_valid_os_versions_for_breed(self):
"""
Test: get valid OS versions for a OS breed
"""
tprint("get_valid_os_versions_for_breeds")
versions = self.remote.get_valid_os_versions_for_breed("generic", self.token)
self.assertTrue(len(versions) > 0)
def test_get_valid_os_versions(self):
"""
Test: get valid OS versions
"""
tprint("get_valid_os_versions")
versions = self.remote.get_valid_os_versions(self.token)
self.assertTrue(len(versions) > 0)
def test_get_random_mac(self):
"""
Test: get a random mac for a virtual network interface
"""
tprint("get_random_mac")
mac = self.remote.get_random_mac("xen", self.token)
hexa = "[0-9A-Fa-f]{2}"
match_obj = re.match("%s:%s:%s:%s:%s:%s" % (hexa, hexa, hexa, hexa, hexa, hexa), mac)
self.assertTrue(match_obj)
if __name__ == '__main__':
unittest.main()
|
mrry/tensorflow
|
refs/heads/windows
|
tensorflow/contrib/learn/python/learn/dataframe/__init__.py
|
86
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataFrames for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe.dataframe import DataFrame
from tensorflow.contrib.learn.python.learn.dataframe.series import PredefinedSeries
from tensorflow.contrib.learn.python.learn.dataframe.series import Series
from tensorflow.contrib.learn.python.learn.dataframe.series import TransformedSeries
from tensorflow.contrib.learn.python.learn.dataframe.tensorflow_dataframe import TensorFlowDataFrame
from tensorflow.contrib.learn.python.learn.dataframe.transform import parameter
from tensorflow.contrib.learn.python.learn.dataframe.transform import TensorFlowTransform
from tensorflow.contrib.learn.python.learn.dataframe.transform import Transform
# Transforms
from tensorflow.contrib.learn.python.learn.dataframe.transforms.boolean_mask import BooleanMask
from tensorflow.contrib.learn.python.learn.dataframe.transforms.difference import Difference
from tensorflow.contrib.learn.python.learn.dataframe.transforms.hashes import HashFast
from tensorflow.contrib.learn.python.learn.dataframe.transforms.in_memory_source import NumpySource
from tensorflow.contrib.learn.python.learn.dataframe.transforms.in_memory_source import PandasSource
from tensorflow.contrib.learn.python.learn.dataframe.transforms.reader_source import ReaderSource
# Coming soon; multichange client hassle due to no DIFFBASE in Cider
# from tensorflow.contrib.learn.python.learn.dataframe \
# .transforms.split_mask import SplitMask
from tensorflow.contrib.learn.python.learn.dataframe.transforms.sum import Sum
# pylint: disable=g-import-not-at-top,g-bad-import-order
# Unary Transform registration
from tensorflow.contrib.learn.python.learn.dataframe.transforms import unary_transforms as _ut
for ut_def in _ut.UNARY_TRANSFORMS:
_ut.register_unary_op(*ut_def)
# Comparison Transform registration
from tensorflow.contrib.learn.python.learn.dataframe.transforms import binary_transforms as _bt
for bt_def in _bt.BINARY_TRANSFORMS:
_bt.register_binary_op(*bt_def)
__all__ = ['DataFrame', 'Series', 'PredefinedSeries', 'TransformedSeries',
'TensorFlowDataFrame', 'TensorFlowTransform', 'parameter',
'Transform']
|
semiautomaticgit/SemiAutomaticClassificationPlugin
|
refs/heads/master
|
maininterface/rgblistTab.py
|
1
|
# -*- coding: utf-8 -*-
'''
/**************************************************************************************************************************
SemiAutomaticClassificationPlugin
The Semi-Automatic Classification Plugin for QGIS allows for the supervised classification of remote sensing images,
providing tools for the download, the preprocessing and postprocessing of images.
-------------------
begin : 2012-12-29
copyright : (C) 2012-2021 by Luca Congedo
email : ing.congedoluca@gmail.com
**************************************************************************************************************************/
/**************************************************************************************************************************
*
* This file is part of Semi-Automatic Classification Plugin
*
* Semi-Automatic Classification Plugin is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software Foundation,
* version 3 of the License.
*
* Semi-Automatic Classification Plugin is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with
* Semi-Automatic Classification Plugin. If not, see <http://www.gnu.org/licenses/>.
*
**************************************************************************************************************************/
'''
cfg = __import__(str(__name__).split('.')[0] + '.core.config', fromlist=[''])
class RGBListTab:
def __init__(self):
self.tableEdited = 'Yes'
# Create RGB list table
def RGBListTable(self, list):
l = cfg.ui.RGB_tableWidget
self.tableEdited = 'No'
l.blockSignals(True)
cfg.utls.clearTable(l)
x = 0
for i in list:
if i != "-":
cfg.utls.insertTableRow(l, x)
cfg.utls.addTableItem(l, i, x, 0)
x = x + 1
l.blockSignals(False)
self.tableEdited = 'Yes'
# edited table
def editedTable(self, row, column):
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), "")
if self.tableEdited == 'Yes':
tW = cfg.ui.RGB_tableWidget
t = tW.item(row, column).text()
try:
check = cfg.utls.createRGBColorComposite(t)
except Exception as err:
# logger
cfg.utls.logCondition(str(__name__) + '-' + (cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), ' ERROR exception: ' + str(err))
check = 'No'
if check == 'Yes':
listA = self.readRGBListTable()
cfg.RGBList = listA
cfg.utls.writeProjectVariable("SCP_RGBList", str(cfg.RGBList))
cfg.utls.setComboboxItems(cfg.rgb_combo, cfg.RGBList)
id = cfg.rgb_combo.findText(t)
cfg.rgb_combo.setCurrentIndex(id)
else:
cfg.RGBLT.RGBListTable(cfg.RGBList)
# read RGB List table
def readRGBListTable(self):
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), "")
tW = cfg.ui.RGB_tableWidget
c = tW.rowCount()
list = []
list.append("-")
for b in range(0, c):
t = tW.item(b, 0).text()
list.append(t)
return list
# add RGB
def addRGBToTable(self):
tW = cfg.ui.RGB_tableWidget
# add item to table
c = tW.rowCount()
# add list items to table
tW.setRowCount(c + 1)
cfg.utls.addTableItem(tW, "0-0-0", c, 0)
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), "added RGB " + str(c + 1))
# remove RGB
def removeRGBFromTable(self):
cfg.utls.removeRowsFromTable(cfg.ui.RGB_tableWidget)
listA = self.readRGBListTable()
cfg.RGBList = listA
cfg.utls.writeProjectVariable("SCP_RGBList", str(cfg.RGBList))
cfg.utls.setComboboxItems(cfg.rgb_combo, cfg.RGBList)
# sort RGB
def sortRGBName(self):
listA = cfg.RGBList
sortRGB = sorted(listA)
cfg.RGBList = sortRGB
cfg.RGBLT.RGBListTable(cfg.RGBList)
cfg.utls.writeProjectVariable("SCP_RGBList", str(cfg.RGBList))
cfg.utls.setComboboxItems(cfg.rgb_combo, cfg.RGBList)
# clear the band set
def clearTableAction(self):
self.clearTable()
# clear Table
def clearTable(self, question = 'Yes'):
if question == 'Yes':
# ask for confirm
a = cfg.utls.questionBox(cfg.QtWidgetsSCP.QApplication.translate("semiautomaticclassificationplugin", "Reset RGB list"), cfg.QtWidgetsSCP.QApplication.translate("semiautomaticclassificationplugin", "Are you sure you want to clear the RGB list?"))
else:
a = 'Yes'
if a == 'Yes':
tW = cfg.ui.RGB_tableWidget
cfg.utls.clearTable(tW)
listA = self.readRGBListTable()
cfg.RGBList = listA
cfg.utls.writeProjectVariable("SCP_RGBList", str(cfg.RGBList))
cfg.utls.setComboboxItems(cfg.rgb_combo, cfg.RGBList)
# move up selected RGB
def moveUpRGB(self):
tW = cfg.ui.RGB_tableWidget
self.tableEdited = 'No'
tW.blockSignals(True)
c = tW.rowCount()
s = tW.selectedItems()
# create list for new selection after move
ns = []
for i in range (0, len(s)):
ns.append(s[i].row() - 1)
try:
for b in range(0, c):
if tW.item(b, 0).isSelected():
bNU = tW.item(b, 0).text()
bND = tW.item(b - 1, 0).text()
tW.item(b, 0).setText(str(bND))
tW.item(b - 1, 0).setText(str(bNU))
tW.clearSelection()
v = list(set(ns))
for i in range (0, len(v)):
tW.selectRow(v[i])
except Exception as err:
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), " ERROR exception: " + str(err))
tW.clearSelection()
self.tableEdited = 'Yes'
tW.blockSignals(False)
listA = self.readRGBListTable()
cfg.RGBList = listA
cfg.utls.writeProjectVariable("SCP_RGBList", str(cfg.RGBList))
cfg.utls.setComboboxItems(cfg.rgb_combo, cfg.RGBList)
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), " RGB moved")
# move down selected RGB
def moveDownRGB(self):
tW = cfg.ui.RGB_tableWidget
self.tableEdited = 'No'
tW.blockSignals(True)
c = tW.rowCount()
s = tW.selectedItems()
# create list for new selection after move
ns = []
for i in range (0, len(s)):
ns.append(s[i].row() + 1)
try:
for b in reversed(list(range(0, c))):
if tW.item(b, 0).isSelected():
bNU = tW.item(b, 0).text()
bND = tW.item(b + 1, 0).text()
tW.item(b, 0).setText(str(bND))
tW.item(b + 1, 0).setText(str(bNU))
tW.clearSelection()
v = list(set(ns))
for i in range (0, len(v)):
tW.selectRow(v[i])
except Exception as err:
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), " ERROR exception: " + str(err))
tW.clearSelection()
self.tableEdited = 'Yes'
tW.blockSignals(False)
listA = self.readRGBListTable()
cfg.RGBList = listA
cfg.utls.writeProjectVariable("SCP_RGBList", str(cfg.RGBList))
cfg.utls.setComboboxItems(cfg.rgb_combo, cfg.RGBList)
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), " RGB moved")
# all RGB List
def allRGBListAction(self):
self.allRGBList()
# all RGB List
def allRGBList(self, question = 'Yes', bandSetNumber = None):
if question == 'Yes':
# ask for confirm
a = cfg.utls.questionBox(cfg.QtWidgetsSCP.QApplication.translate("semiautomaticclassificationplugin", "RGB list"), cfg.QtWidgetsSCP.QApplication.translate("semiautomaticclassificationplugin", "Calculate all the RGB combinations?"))
else:
a = 'Yes'
if a == 'Yes':
if bandSetNumber is None:
bandSetNumber = cfg.bndSetNumber
if bandSetNumber >= len(cfg.bandSetsList):
cfg.mx.msgWar25(bandSetNumber + 1)
return 'No'
perm = list(cfg.itertoolsSCP.permutations(list(range(1, len(cfg.bandSetsList[bandSetNumber][3])+1)), 3))
tW = cfg.ui.RGB_tableWidget
self.tableEdited = 'No'
tW.blockSignals(True)
cfg.utls.clearTable(tW)
for x in perm:
c = tW.rowCount()
# add list items to table
tW.setRowCount(c + 1)
cfg.utls.addTableItem(tW, str(x[0]) + "-" + str(x[1]) + "-" + str(x[2]), c, 0)
self.tableEdited = 'Yes'
tW.blockSignals(False)
listA = self.readRGBListTable()
cfg.RGBList = listA
cfg.utls.writeProjectVariable("SCP_RGBList", str(cfg.RGBList))
cfg.utls.setComboboxItems(cfg.rgb_combo, cfg.RGBList)
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), "")
# export RGB list to file
def exportRGBList(self):
file = cfg.utls.getSaveFileName(None , cfg.QtWidgetsSCP.QApplication.translate("semiautomaticclassificationplugin", "Save the RGB list to file"), "", "*.csv", "csv")
if file is not False:
if file.lower().endswith(".csv"):
pass
else:
file = file + ".csv"
try:
f = open(file, 'w')
f.write("")
f.close()
f = open(file, 'a')
for i in cfg.RGBList:
if i != "-":
txt = i + "\n"
f.write(txt)
f.close()
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), " list exported")
except Exception as err:
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), " ERROR exception: " + str(err))
# import RGB from file
def importRGB(self):
file = cfg.utls.getOpenFileName(None , "Select a RGB list file", "", "CSV (*.csv)")
try:
f = open(file)
if cfg.osSCP.path.isfile(file):
file = f.readlines()
tW = cfg.ui.RGB_tableWidget
# RGB list
for b in range(1, len(file)):
# add item to table
c = tW.rowCount()
# add list items to table
tW.setRowCount(c + 1)
cfg.utls.addTableItem(tW, file[b].strip(), c, 0)
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), " list imported")
except Exception as err:
cfg.mx.msgErr19()
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), " ERROR exception: " + str(err))
|
roselleebarle04/django
|
refs/heads/master
|
django/contrib/gis/maps/google/overlays.py
|
133
|
from __future__ import unicode_literals
from functools import total_ordering
from django.contrib.gis.geos import (
LinearRing, LineString, Point, Polygon, fromstr,
)
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import html_safe
@html_safe
@python_2_unicode_compatible
class GEvent(object):
"""
A Python wrapper for the Google GEvent object.
Events can be attached to any object derived from GOverlayBase with the
add_event() call.
For more information please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GEvent
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google import GoogleMap, GEvent, GPolyline
def sample_request(request):
polyline = GPolyline('LINESTRING(101 26, 112 26, 102 31)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
polyline.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(polylines=[polyline])})
"""
def __init__(self, event, action):
"""
Initializes a GEvent object.
Parameters:
event:
string for the event, such as 'click'. The event must be a valid
event for the object in the Google Maps API.
There is no validation of the event type within Django.
action:
string containing a Javascript function, such as
'function() { location.href = "newurl";}'
The string must be a valid Javascript function. Again there is no
validation fo the function within Django.
"""
self.event = event
self.action = action
def __str__(self):
"Returns the parameter part of a GEvent."
return '"%s", %s' % (self.event, self.action)
@html_safe
@python_2_unicode_compatible
class GOverlayBase(object):
def __init__(self):
self.events = []
def latlng_from_coords(self, coords):
"Generates a JavaScript array of GLatLng objects for the given coordinates."
return '[%s]' % ','.join('new GLatLng(%s,%s)' % (y, x) for x, y in coords)
def add_event(self, event):
"Attaches a GEvent to the overlay object."
self.events.append(event)
def __str__(self):
"The string representation is the JavaScript API call."
return '%s(%s)' % (self.__class__.__name__, self.js_params)
class GPolygon(GOverlayBase):
"""
A Python wrapper for the Google GPolygon object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GPolygon
"""
def __init__(self, poly,
stroke_color='#0000ff', stroke_weight=2, stroke_opacity=1,
fill_color='#0000ff', fill_opacity=0.4):
"""
The GPolygon object initializes on a GEOS Polygon or a parameter that
may be instantiated into GEOS Polygon. Please note that this will not
depict a Polygon's internal rings.
Keyword Options:
stroke_color:
The color of the polygon outline. Defaults to '#0000ff' (blue).
stroke_weight:
The width of the polygon outline, in pixels. Defaults to 2.
stroke_opacity:
The opacity of the polygon outline, between 0 and 1. Defaults to 1.
fill_color:
The color of the polygon fill. Defaults to '#0000ff' (blue).
fill_opacity:
The opacity of the polygon fill. Defaults to 0.4.
"""
if isinstance(poly, six.string_types):
poly = fromstr(poly)
if isinstance(poly, (tuple, list)):
poly = Polygon(poly)
if not isinstance(poly, Polygon):
raise TypeError('GPolygon may only initialize on GEOS Polygons.')
# Getting the envelope of the input polygon (used for automatically
# determining the zoom level).
self.envelope = poly.envelope
# Translating the coordinates into a JavaScript array of
# Google `GLatLng` objects.
self.points = self.latlng_from_coords(poly.shell.coords)
# Stroke settings.
self.stroke_color, self.stroke_opacity, self.stroke_weight = stroke_color, stroke_opacity, stroke_weight
# Fill settings.
self.fill_color, self.fill_opacity = fill_color, fill_opacity
super(GPolygon, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s, "%s", %s' % (self.points, self.stroke_color, self.stroke_weight, self.stroke_opacity,
self.fill_color, self.fill_opacity)
class GPolyline(GOverlayBase):
"""
A Python wrapper for the Google GPolyline object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GPolyline
"""
def __init__(self, geom, color='#0000ff', weight=2, opacity=1):
"""
The GPolyline object may be initialized on GEOS LineStirng, LinearRing,
and Polygon objects (internal rings not supported) or a parameter that
may instantiated into one of the above geometries.
Keyword Options:
color:
The color to use for the polyline. Defaults to '#0000ff' (blue).
weight:
The width of the polyline, in pixels. Defaults to 2.
opacity:
The opacity of the polyline, between 0 and 1. Defaults to 1.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Polygon(geom)
# Generating the lat/lng coordinate pairs.
if isinstance(geom, (LineString, LinearRing)):
self.latlngs = self.latlng_from_coords(geom.coords)
elif isinstance(geom, Polygon):
self.latlngs = self.latlng_from_coords(geom.shell.coords)
else:
raise TypeError('GPolyline may only initialize on GEOS LineString, LinearRing, and/or Polygon geometries.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
self.color, self.weight, self.opacity = color, weight, opacity
super(GPolyline, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s' % (self.latlngs, self.color, self.weight, self.opacity)
@total_ordering
class GIcon(object):
"""
Creates a GIcon object to pass into a Gmarker object.
The keyword arguments map to instance attributes of the same name. These,
in turn, correspond to a subset of the attributes of the official GIcon
javascript object:
http://code.google.com/apis/maps/documentation/reference.html#GIcon
Because a Google map often uses several different icons, a name field has
been added to the required arguments.
Required Arguments:
varname:
A string which will become the basis for the js variable name of
the marker, for this reason, your code should assign a unique
name for each GIcon you instantiate, otherwise there will be
name space collisions in your javascript.
Keyword Options:
image:
The url of the image to be used as the icon on the map defaults
to 'G_DEFAULT_ICON'
iconsize:
a tuple representing the pixel size of the foreground (not the
shadow) image of the icon, in the format: (width, height) ex.:
GIcon('fast_food',
image="/media/icon/star.png",
iconsize=(15,10))
Would indicate your custom icon was 15px wide and 10px height.
shadow:
the url of the image of the icon's shadow
shadowsize:
a tuple representing the pixel size of the shadow image, format is
the same as ``iconsize``
iconanchor:
a tuple representing the pixel coordinate relative to the top left
corner of the icon image at which this icon is anchored to the map.
In (x, y) format. x increases to the right in the Google Maps
coordinate system and y increases downwards in the Google Maps
coordinate system.)
infowindowanchor:
The pixel coordinate relative to the top left corner of the icon
image at which the info window is anchored to this icon.
"""
def __init__(self, varname, image=None, iconsize=None,
shadow=None, shadowsize=None, iconanchor=None,
infowindowanchor=None):
self.varname = varname
self.image = image
self.iconsize = iconsize
self.shadow = shadow
self.shadowsize = shadowsize
self.iconanchor = iconanchor
self.infowindowanchor = infowindowanchor
def __eq__(self, other):
return self.varname == other.varname
def __lt__(self, other):
return self.varname < other.varname
def __hash__(self):
# XOR with hash of GIcon type so that hash('varname') won't
# equal hash(GIcon('varname')).
return hash(self.__class__) ^ hash(self.varname)
class GMarker(GOverlayBase):
"""
A Python wrapper for the Google GMarker object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GMarker
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google.overlays import GMarker, GEvent
def sample_request(request):
marker = GMarker('POINT(101 26)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
marker.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(markers=[marker])})
"""
def __init__(self, geom, title=None, draggable=False, icon=None):
"""
The GMarker object may initialize on GEOS Points or a parameter
that may be instantiated into a GEOS point. Keyword options map to
GMarkerOptions -- so far only the title option is supported.
Keyword Options:
title:
Title option for GMarker, will be displayed as a tooltip.
draggable:
Draggable option for GMarker, disabled by default.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Point(geom)
if isinstance(geom, Point):
self.latlng = self.latlng_from_coords(geom.coords)
else:
raise TypeError('GMarker may only initialize on GEOS Point geometry.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
# TODO: Add support for more GMarkerOptions
self.title = title
self.draggable = draggable
self.icon = icon
super(GMarker, self).__init__()
def latlng_from_coords(self, coords):
return 'new GLatLng(%s,%s)' % (coords[1], coords[0])
def options(self):
result = []
if self.title:
result.append('title: "%s"' % self.title)
if self.icon:
result.append('icon: %s' % self.icon.varname)
if self.draggable:
result.append('draggable: true')
return '{%s}' % ','.join(result)
@property
def js_params(self):
return '%s, %s' % (self.latlng, self.options())
|
Bluetide/Cactus
|
refs/heads/master
|
cactus/utils/network.py
|
9
|
#coding:utf-8
import logging
import time
import urllib2
from cactus.utils.parallel import multiMap
logger = logging.getLogger(__name__)
def retry(exceptions, tries=4, delay=3, backoff=2):
"""
Retry execution in case we fail on one of the exceptions
"""
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
try_one_last_time = True
while mtries > 1:
try:
return f(*args, **kwargs)
except exceptions as e:
logger.warning("%s, Retrying in %.1f seconds..." % (str(e), mdelay))
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
if try_one_last_time:
return f(*args, **kwargs)
return
return f_retry # true decorator
return deco_retry
def internetWorking():
def check(url):
try:
response = urllib2.urlopen(url, timeout = 1)
return True
except urllib2.URLError as err:
pass
return False
return True in multiMap(check, [
'http://www.google.com',
'http://www.apple.com'])
|
zteifel/roboloid
|
refs/heads/master
|
lib/DynamixelSDK/python/protocol2_0/sync_read_write.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2016, ROBOTIS CO., LTD.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ROBOTIS nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
# Author: Ryu Woon Jung (Leon)
#
# ********* Sync Read and Sync Write Example *********
#
#
# Available Dynamixel model on this example : All models using Protocol 2.0
# This example is designed for using two Dynamixel PRO 54-200, and an USB2DYNAMIXEL.
# To use another Dynamixel model, such as X series, see their details in E-Manual(support.robotis.com) and edit below variables yourself.
# Be sure that Dynamixel PRO properties are already set as %% ID : 1 / Baudnum : 3 (Baudrate : 1000000 [1M])
#
import os, ctypes
if os.name == 'nt':
import msvcrt
def getch():
return msvcrt.getch().decode()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
os.sys.path.append('../dynamixel_functions_py') # Path setting
import dynamixel_functions as dynamixel # Uses Dynamixel SDK library
# Control table address
ADDR_PRO_TORQUE_ENABLE = 562 # Control table address is different in Dynamixel model
ADDR_PRO_GOAL_POSITION = 596
ADDR_PRO_PRESENT_POSITION = 611
# Data Byte Length
LEN_PRO_GOAL_POSITION = 4
LEN_PRO_PRESENT_POSITION = 4
# Protocol version
PROTOCOL_VERSION = 2 # See which protocol version is used in the Dynamixel
# Default setting
DXL1_ID = 1 # Dynamixel ID: 1
DXL2_ID = 2 # Dynamixel ID: 2
BAUDRATE = 1000000
DEVICENAME = "/dev/ttyUSB0".encode('utf-8')# Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0"
TORQUE_ENABLE = 1 # Value for enabling the torque
TORQUE_DISABLE = 0 # Value for disabling the torque
DXL_MINIMUM_POSITION_VALUE = -150000 # Dynamixel will rotate between this value
DXL_MAXIMUM_POSITION_VALUE = 150000 # and this value (note that the Dynamixel would not move when the position value is out of movable range. Check e-manual about the range of the Dynamixel you use.)
DXL_MOVING_STATUS_THRESHOLD = 20 # Dynamixel moving status threshold
ESC_ASCII_VALUE = 0x1b
COMM_SUCCESS = 0 # Communication Success result value
COMM_TX_FAIL = -1001 # Communication Tx Failed
# Initialize PortHandler Structs
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
port_num = dynamixel.portHandler(DEVICENAME)
# Initialize PacketHandler Structs
dynamixel.packetHandler()
# Initialize Groupsyncwrite instance
groupwrite_num = dynamixel.groupSyncWrite(port_num, PROTOCOL_VERSION, ADDR_PRO_GOAL_POSITION, LEN_PRO_GOAL_POSITION)
# Initialize Groupsyncread Structs for Present Position
groupread_num = dynamixel.groupSyncRead(port_num, PROTOCOL_VERSION, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
index = 0
dxl_comm_result = COMM_TX_FAIL # Communication result
dxl_addparam_result = 0 # AddParam result
dxl_getdata_result = 0 # GetParam result
dxl_goal_position = [DXL_MINIMUM_POSITION_VALUE, DXL_MAXIMUM_POSITION_VALUE] # Goal position
dxl_error = 0 # Dynamixel error
dxl1_present_position = 0 # Present position
dxl2_present_position = 0
# Open port
if dynamixel.openPort(port_num):
print("Succeeded to open the port!")
else:
print("Failed to open the port!")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if dynamixel.setBaudRate(port_num, BAUDRATE):
print("Succeeded to change the baudrate!")
else:
print("Failed to change the baudrate!")
print("Press any key to terminate...")
getch()
quit()
# Enable Dynamixel#1 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL1_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
else:
print("Dynamixel#1 has been successfully connected")
# Enable Dynamixel#2 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL2_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
else:
print("Dynamixel#2 has been successfully connected")
# Add parameter storage for Dynamixel#1 present position value
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncReadAddParam(groupread_num, DXL1_ID)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncRead addparam failed" % (DXL1_ID))
quit()
# Add parameter storage for Dynamixel#2 present position value
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncReadAddParam(groupread_num, DXL2_ID)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncRead addparam failed" % (DXL2_ID))
quit()
while 1:
print("Press any key to continue! (or press ESC to quit!)")
if getch() == chr(ESC_ASCII_VALUE):
break
# Add Dynamixel#1 goal position value to the Syncwrite storage
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncWriteAddParam(groupwrite_num, DXL1_ID, dxl_goal_position[index], LEN_PRO_GOAL_POSITION)).value
print(dxl_addparam_result)
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncWrite addparam failed" % (DXL1_ID))
quit()
# Add Dynamixel#2 goal position value to the Syncwrite parameter storage
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncWriteAddParam(groupwrite_num, DXL2_ID, dxl_goal_position[index], LEN_PRO_GOAL_POSITION)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncWrite addparam failed" % (DXL2_ID))
quit()
# Syncwrite goal position
dynamixel.groupSyncWriteTxPacket(groupwrite_num)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
# Clear syncwrite parameter storage
dynamixel.groupSyncWriteClearParam(groupwrite_num)
while 1:
# Syncread present position
dynamixel.groupSyncReadTxRxPacket(groupread_num)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
# Check if groupsyncread data of Dynamixel#1 is available
dxl_getdata_result = ctypes.c_ubyte(dynamixel.groupSyncReadIsAvailable(groupread_num, DXL1_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)).value
if dxl_getdata_result != 1:
print("[ID:%03d] groupSyncRead getdata failed" % (DXL1_ID))
quit()
# Check if groupsyncread data of Dynamixel#2 is available
dxl_getdata_result = ctypes.c_ubyte(dynamixel.groupSyncReadIsAvailable(groupread_num, DXL2_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)).value
if dxl_getdata_result != 1:
print("[ID:%03d] groupSyncRead getdata failed" % (DXL2_ID))
quit()
# Get Dynamixel#1 present position value
dxl1_present_position = dynamixel.groupSyncReadGetData(groupread_num, DXL1_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
# Get Dynamixel#2 present position value
dxl2_present_position = dynamixel.groupSyncReadGetData(groupread_num, DXL2_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
print("[ID:%03d] GoalPos:%03d PresPos:%03d\t[ID:%03d] GoalPos:%03d PresPos:%03d" % (DXL1_ID, dxl_goal_position[index], dxl1_present_position, DXL2_ID, dxl_goal_position[index], dxl2_present_position))
if not ((abs(dxl_goal_position[index] - dxl1_present_position) > DXL_MOVING_STATUS_THRESHOLD) or (abs(dxl_goal_position[index] - dxl2_present_position) > DXL_MOVING_STATUS_THRESHOLD)):
break
# Change goal position
if index == 0:
index = 1
else:
index = 0
# Disable Dynamixel#1 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL1_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Disable Dynamixel#2 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL2_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Close port
dynamixel.closePort(port_num)
|
jtremback/FlyingFox
|
refs/heads/master
|
docs/python/pt/bci.py
|
5
|
#!/usr/bin/python
import urllib2, json, re, random, sys
# Makes a request to a given URL (first argument) and optional params (second argument)
def make_request(*args):
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0'+str(random.randrange(1000000)))]
try:
return opener.open(*args).read().strip()
except Exception,e:
try: p = e.read().strip()
except: p = e
raise Exception(p)
# Gets the transaction output history of a given set of addresses,
# including whether or not they have been spent
def history(*args):
# Valid input formats: history([addr1, addr2,addr3])
# history(addr1, addr2, addr3)
if len(args) == 0: return []
elif isinstance(args[0],list): addrs = args[0]
else: addrs = args
txs = []
for addr in addrs:
offset = 0
while 1:
data = make_request('http://blockchain.info/address/%s?format=json&offset=%s' % (addr,offset))
try:
jsonobj = json.loads(data)
except:
raise Exception("Failed to decode data: "+data)
txs.extend(jsonobj["txs"])
if len(jsonobj["txs"]) < 50: break
offset += 50
sys.stderr.write("Fetching more transactions... "+str(offset)+'\n')
outs = {}
for tx in txs:
for o in tx["out"]:
if o['addr'] in addrs:
key = str(tx["tx_index"])+':'+str(o["n"])
outs[key] = {
"address" : o["addr"],
"value" : o["value"],
"output" : tx["hash"]+':'+str(o["n"]),
"block_height" : tx.get("block_height",None)
}
for tx in txs:
for i, inp in enumerate(tx["inputs"]):
if inp["prev_out"]["addr"] in addrs:
key = str(inp["prev_out"]["tx_index"])+':'+str(inp["prev_out"]["n"])
if outs.get(key): outs[key]["spend"] = tx["hash"]+':'+str(i)
return [outs[k] for k in outs]
# Pushes a transaction to the network using http://blockchain.info/pushtx
def pushtx(tx):
if not re.match('^[0-9a-fA-F]*$',tx): tx = tx.encode('hex')
return make_request('http://blockchain.info/pushtx','tx='+tx)
def eligius_pushtx(tx):
if not re.match('^[0-9a-fA-F]*$',tx): tx = tx.encode('hex')
s = make_request('http://eligius.st/~wizkid057/newstats/pushtxn.php','transaction='+tx+'&send=Push')
strings = re.findall('string[^"]*"[^"]*"',s)
for string in strings:
quote = re.findall('"[^"]*"',string)[0]
if len(quote) >= 5: return quote[1:-1]
def last_block_height():
data = make_request('http://blockchain.info/latestblock')
jsonobj = json.loads(data)
return jsonobj["height"]
# Gets a specific transaction
def fetchtx(txhash):
if not re.match('^[0-9a-fA-F]*$',txhash): txhash = txhash.encode('hex')
data = make_request('http://blockchain.info/rawtx/'+txhash+'?format=hex')
return data
def firstbits(address):
if len(address) >= 25:
return make_request('https://blockchain.info/q/getfirstbits/'+address)
else:
return make_request('https://blockchain.info/q/resolvefirstbits/'+address)
|
plxaye/chromium
|
refs/heads/master
|
src/third_party/protobuf/python/stubout.py
|
670
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
class StubOutForTesting:
"""Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.UnsetAll()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the UnsetAll() looks up the old value
of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr. This method is smart and works
at the module, class, and instance level while preserving proper
inheritance. It will not stub out C types however unless that has been
explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Raises AttributeError if the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
except AttributeError:
continue
if orig_attr is None:
raise AttributeError("Attribute not found.")
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses all the SmartSet() calls, restoring things to their original
definition. Its okay to call SmartUnsetAll() repeatedly, as later calls
have no effect if no SmartSet() calls have been made.
"""
self.stubs.reverse()
for args in self.stubs:
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses all the Set() calls, restoring things to their original
definition. Its okay to call UnsetAll() repeatedly, as later calls have
no effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
self.cache.reverse()
for (parent, old_child, child_name) in self.cache:
setattr(parent, child_name, old_child)
self.cache = []
|
mortada/numpy
|
refs/heads/master
|
numpy/lib/user_array.py
|
111
|
"""
Standard container-class for easy multiple-inheritance.
Try to inherit from the ndarray instead of using this class as this is not
complete.
"""
from __future__ import division, absolute_import, print_function
from numpy.core import (
array, asarray, absolute, add, subtract, multiply, divide,
remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
greater_equal, shape, reshape, arange, sin, sqrt, transpose
)
from numpy.compat import long
class container(object):
def __init__(self, data, dtype=None, copy=True):
self.array = array(data, dtype, copy=copy)
def __repr__(self):
if len(self.shape) > 0:
return self.__class__.__name__ + repr(self.array)[len("array"):]
else:
return self.__class__.__name__ + "(" + repr(self.array) + ")"
def __array__(self, t=None):
if t:
return self.array.astype(t)
return self.array
# Array as sequence
def __len__(self):
return len(self.array)
def __getitem__(self, index):
return self._rc(self.array[index])
def __getslice__(self, i, j):
return self._rc(self.array[i:j])
def __setitem__(self, index, value):
self.array[index] = asarray(value, self.dtype)
def __setslice__(self, i, j, value):
self.array[i:j] = asarray(value, self.dtype)
def __abs__(self):
return self._rc(absolute(self.array))
def __neg__(self):
return self._rc(-self.array)
def __add__(self, other):
return self._rc(self.array + asarray(other))
__radd__ = __add__
def __iadd__(self, other):
add(self.array, other, self.array)
return self
def __sub__(self, other):
return self._rc(self.array - asarray(other))
def __rsub__(self, other):
return self._rc(asarray(other) - self.array)
def __isub__(self, other):
subtract(self.array, other, self.array)
return self
def __mul__(self, other):
return self._rc(multiply(self.array, asarray(other)))
__rmul__ = __mul__
def __imul__(self, other):
multiply(self.array, other, self.array)
return self
def __div__(self, other):
return self._rc(divide(self.array, asarray(other)))
def __rdiv__(self, other):
return self._rc(divide(asarray(other), self.array))
def __idiv__(self, other):
divide(self.array, other, self.array)
return self
def __mod__(self, other):
return self._rc(remainder(self.array, other))
def __rmod__(self, other):
return self._rc(remainder(other, self.array))
def __imod__(self, other):
remainder(self.array, other, self.array)
return self
def __divmod__(self, other):
return (self._rc(divide(self.array, other)),
self._rc(remainder(self.array, other)))
def __rdivmod__(self, other):
return (self._rc(divide(other, self.array)),
self._rc(remainder(other, self.array)))
def __pow__(self, other):
return self._rc(power(self.array, asarray(other)))
def __rpow__(self, other):
return self._rc(power(asarray(other), self.array))
def __ipow__(self, other):
power(self.array, other, self.array)
return self
def __lshift__(self, other):
return self._rc(left_shift(self.array, other))
def __rshift__(self, other):
return self._rc(right_shift(self.array, other))
def __rlshift__(self, other):
return self._rc(left_shift(other, self.array))
def __rrshift__(self, other):
return self._rc(right_shift(other, self.array))
def __ilshift__(self, other):
left_shift(self.array, other, self.array)
return self
def __irshift__(self, other):
right_shift(self.array, other, self.array)
return self
def __and__(self, other):
return self._rc(bitwise_and(self.array, other))
def __rand__(self, other):
return self._rc(bitwise_and(other, self.array))
def __iand__(self, other):
bitwise_and(self.array, other, self.array)
return self
def __xor__(self, other):
return self._rc(bitwise_xor(self.array, other))
def __rxor__(self, other):
return self._rc(bitwise_xor(other, self.array))
def __ixor__(self, other):
bitwise_xor(self.array, other, self.array)
return self
def __or__(self, other):
return self._rc(bitwise_or(self.array, other))
def __ror__(self, other):
return self._rc(bitwise_or(other, self.array))
def __ior__(self, other):
bitwise_or(self.array, other, self.array)
return self
def __pos__(self):
return self._rc(self.array)
def __invert__(self):
return self._rc(invert(self.array))
def _scalarfunc(self, func):
if len(self.shape) == 0:
return func(self[0])
else:
raise TypeError(
"only rank-0 arrays can be converted to Python scalars.")
def __complex__(self):
return self._scalarfunc(complex)
def __float__(self):
return self._scalarfunc(float)
def __int__(self):
return self._scalarfunc(int)
def __long__(self):
return self._scalarfunc(long)
def __hex__(self):
return self._scalarfunc(hex)
def __oct__(self):
return self._scalarfunc(oct)
def __lt__(self, other):
return self._rc(less(self.array, other))
def __le__(self, other):
return self._rc(less_equal(self.array, other))
def __eq__(self, other):
return self._rc(equal(self.array, other))
def __ne__(self, other):
return self._rc(not_equal(self.array, other))
def __gt__(self, other):
return self._rc(greater(self.array, other))
def __ge__(self, other):
return self._rc(greater_equal(self.array, other))
def copy(self):
return self._rc(self.array.copy())
def tostring(self):
return self.array.tostring()
def byteswap(self):
return self._rc(self.array.byteswap())
def astype(self, typecode):
return self._rc(self.array.astype(typecode))
def _rc(self, a):
if len(shape(a)) == 0:
return a
else:
return self.__class__(a)
def __array_wrap__(self, *args):
return self.__class__(args[0])
def __setattr__(self, attr, value):
if attr == 'array':
object.__setattr__(self, attr, value)
return
try:
self.array.__setattr__(attr, value)
except AttributeError:
object.__setattr__(self, attr, value)
# Only called after other approaches fail.
def __getattr__(self, attr):
if (attr == 'array'):
return object.__getattribute__(self, attr)
return self.array.__getattribute__(attr)
#############################################################
# Test of class container
#############################################################
if __name__ == '__main__':
temp = reshape(arange(10000), (100, 100))
ua = container(temp)
# new object created begin test
print(dir(ua))
print(shape(ua), ua.shape) # I have changed Numeric.py
ua_small = ua[:3, :5]
print(ua_small)
# this did not change ua[0,0], which is not normal behavior
ua_small[0, 0] = 10
print(ua_small[0, 0], ua[0, 0])
print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
print(less(ua_small, 103), type(less(ua_small, 103)))
print(type(ua_small * reshape(arange(15), shape(ua_small))))
print(reshape(ua_small, (5, 3)))
print(transpose(ua_small))
|
Pencroff/ai-hackathon-2017
|
refs/heads/master
|
Backend/venv/lib/python3.6/site-packages/flask/wrappers.py
|
121
|
# -*- coding: utf-8 -*-
"""
flask.wrappers
~~~~~~~~~~~~~~
Implements the WSGI wrappers (request and response).
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
from werkzeug.exceptions import BadRequest
from . import json
from .globals import _request_ctx_stack
_missing = object()
def _get_data(req, cache):
getter = getattr(req, 'get_data', None)
if getter is not None:
return getter(cache=cache)
return req.data
class Request(RequestBase):
"""The request object used by default in Flask. Remembers the
matched endpoint and view arguments.
It is what ends up as :class:`~flask.request`. If you want to replace
the request object used you can subclass this and set
:attr:`~flask.Flask.request_class` to your subclass.
The request object is a :class:`~werkzeug.wrappers.Request` subclass and
provides all of the attributes Werkzeug defines plus a few Flask
specific ones.
"""
#: The internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#:
#: .. versionadded:: 0.6
url_rule = None
#: A dict of view arguments that matched the request. If an exception
#: happened when matching, this will be ``None``.
view_args = None
#: If matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception = None
# Switched by the request context until 1.0 to opt in deprecated
# module functionality.
_is_old_module = False
@property
def max_content_length(self):
"""Read-only view of the ``MAX_CONTENT_LENGTH`` config key."""
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app.config['MAX_CONTENT_LENGTH']
@property
def endpoint(self):
"""The endpoint that matched the request. This in combination with
:attr:`view_args` can be used to reconstruct the same or a
modified URL. If an exception happened when matching, this will
be ``None``.
"""
if self.url_rule is not None:
return self.url_rule.endpoint
@property
def module(self):
"""The name of the current module if the request was dispatched
to an actual module. This is deprecated functionality, use blueprints
instead.
"""
from warnings import warn
warn(DeprecationWarning('modules were deprecated in favor of '
'blueprints. Use request.blueprint '
'instead.'), stacklevel=2)
if self._is_old_module:
return self.blueprint
@property
def blueprint(self):
"""The name of the current blueprint"""
if self.url_rule and '.' in self.url_rule.endpoint:
return self.url_rule.endpoint.rsplit('.', 1)[0]
@property
def json(self):
"""If the mimetype is :mimetype:`application/json` this will contain the
parsed JSON data. Otherwise this will be ``None``.
The :meth:`get_json` method should be used instead.
"""
from warnings import warn
warn(DeprecationWarning('json is deprecated. '
'Use get_json() instead.'), stacklevel=2)
return self.get_json()
@property
def is_json(self):
"""Indicates if this request is JSON or not. By default a request
is considered to include JSON data if the mimetype is
:mimetype:`application/json` or :mimetype:`application/*+json`.
.. versionadded:: 0.11
"""
mt = self.mimetype
if mt == 'application/json':
return True
if mt.startswith('application/') and mt.endswith('+json'):
return True
return False
def get_json(self, force=False, silent=False, cache=True):
"""Parses the incoming JSON request data and returns it. By default
this function will return ``None`` if the mimetype is not
:mimetype:`application/json` but this can be overridden by the
``force`` parameter. If parsing fails the
:meth:`on_json_loading_failed` method on the request object will be
invoked.
:param force: if set to ``True`` the mimetype is ignored.
:param silent: if set to ``True`` this method will fail silently
and return ``None``.
:param cache: if set to ``True`` the parsed JSON data is remembered
on the request.
"""
rv = getattr(self, '_cached_json', _missing)
# We return cached JSON only when the cache is enabled.
if cache and rv is not _missing:
return rv
if not (force or self.is_json):
return None
# We accept a request charset against the specification as
# certain clients have been using this in the past. This
# fits our general approach of being nice in what we accept
# and strict in what we send out.
request_charset = self.mimetype_params.get('charset')
try:
data = _get_data(self, cache)
if request_charset is not None:
rv = json.loads(data, encoding=request_charset)
else:
rv = json.loads(data)
except ValueError as e:
if silent:
rv = None
else:
rv = self.on_json_loading_failed(e)
if cache:
self._cached_json = rv
return rv
def on_json_loading_failed(self, e):
"""Called if decoding of the JSON data failed. The return value of
this method is used by :meth:`get_json` when an error occurred. The
default implementation just raises a :class:`BadRequest` exception.
.. versionchanged:: 0.10
Removed buggy previous behavior of generating a random JSON
response. If you want that behavior back you can trivially
add it by subclassing.
.. versionadded:: 0.8
"""
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.config.get('DEBUG', False):
raise BadRequest('Failed to decode JSON object: {0}'.format(e))
raise BadRequest()
def _load_form_data(self):
RequestBase._load_form_data(self)
# In debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.debug and \
self.mimetype != 'multipart/form-data' and not self.files:
from .debughelpers import attach_enctype_error_multidict
attach_enctype_error_multidict(self)
class Response(ResponseBase):
"""The response object that is used by default in Flask. Works like the
response object from Werkzeug but is set to have an HTML mimetype by
default. Quite often you don't have to create this object yourself because
:meth:`~flask.Flask.make_response` will take care of that for you.
If you want to replace the response object used you can subclass this and
set :attr:`~flask.Flask.response_class` to your subclass.
"""
default_mimetype = 'text/html'
|
Mhynlo/SickRage
|
refs/heads/master
|
lib/sqlalchemy/sql/type_api.py
|
78
|
# sql/types_api.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base types API.
"""
from .. import exc, util
from . import operators
from .visitors import Visitable
# these are back-assigned by sqltypes.
BOOLEANTYPE = None
INTEGERTYPE = None
NULLTYPE = None
STRINGTYPE = None
class TypeEngine(Visitable):
"""The ultimate base class for all SQL datatypes.
Common subclasses of :class:`.TypeEngine` include
:class:`.String`, :class:`.Integer`, and :class:`.Boolean`.
For an overview of the SQLAlchemy typing system, see :ref:`types_toplevel`.
.. seealso::
:ref:`types_toplevel`
"""
_sqla_type = True
_isnull = False
class Comparator(operators.ColumnOperators):
"""Base class for custom comparison operations defined at the
type level. See :attr:`.TypeEngine.comparator_factory`.
"""
def __init__(self, expr):
self.expr = expr
def __reduce__(self):
return _reconstitute_comparator, (self.expr, )
hashable = True
"""Flag, if False, means values from this type aren't hashable.
Used by the ORM when uniquing result lists.
"""
comparator_factory = Comparator
"""A :class:`.TypeEngine.Comparator` class which will apply
to operations performed by owning :class:`.ColumnElement` objects.
The :attr:`.comparator_factory` attribute is a hook consulted by
the core expression system when column and SQL expression operations
are performed. When a :class:`.TypeEngine.Comparator` class is
associated with this attribute, it allows custom re-definition of
all existing operators, as well as definition of new operators.
Existing operators include those provided by Python operator overloading
such as :meth:`.operators.ColumnOperators.__add__` and
:meth:`.operators.ColumnOperators.__eq__`,
those provided as standard
attributes of :class:`.operators.ColumnOperators` such as
:meth:`.operators.ColumnOperators.like`
and :meth:`.operators.ColumnOperators.in_`.
Rudimentary usage of this hook is allowed through simple subclassing
of existing types, or alternatively by using :class:`.TypeDecorator`.
See the documentation section :ref:`types_operators` for examples.
.. versionadded:: 0.8 The expression system was enhanced to support
customization of operators on a per-type level.
"""
def copy_value(self, value):
return value
def literal_processor(self, dialect):
"""Return a conversion function for processing literal values that are
to be rendered directly without using binds.
This function is used when the compiler makes use of the
"literal_binds" flag, typically used in DDL generation as well
as in certain scenarios where backends don't accept bound parameters.
.. versionadded:: 0.9.0
"""
return None
def bind_processor(self, dialect):
"""Return a conversion function for processing bind values.
Returns a callable which will receive a bind parameter value
as the sole positional argument and will return a value to
send to the DB-API.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
"""
return None
def result_processor(self, dialect, coltype):
"""Return a conversion function for processing result row values.
Returns a callable which will receive a result row column
value as the sole positional argument and will return a value
to return to the user.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
:param coltype: DBAPI coltype argument received in cursor.description.
"""
return None
def column_expression(self, colexpr):
"""Given a SELECT column expression, return a wrapping SQL expression.
This is typically a SQL function that wraps a column expression
as rendered in the columns clause of a SELECT statement.
It is used for special data types that require
columns to be wrapped in some special database function in order
to coerce the value before being sent back to the application.
It is the SQL analogue of the :meth:`.TypeEngine.result_processor`
method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
See also:
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_column_expression(self):
"""memoized boolean, check if column_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return self.__class__.column_expression.__code__ \
is not TypeEngine.column_expression.__code__
def bind_expression(self, bindvalue):
""""Given a bind value (i.e. a :class:`.BindParameter` instance),
return a SQL expression in its place.
This is typically a SQL function that wraps the existing bound
parameter within the statement. It is used for special data types
that require literals being wrapped in some special database function
in order to coerce an application-level value into a database-specific
format. It is the SQL analogue of the
:meth:`.TypeEngine.bind_processor` method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
Note that this method, when implemented, should always return
the exact same structure, without any conditional logic, as it
may be used in an executemany() call against an arbitrary number
of bound parameter sets.
See also:
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_bind_expression(self):
"""memoized boolean, check if bind_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return self.__class__.bind_expression.__code__ \
is not TypeEngine.bind_expression.__code__
def compare_values(self, x, y):
"""Compare two values for equality."""
return x == y
def get_dbapi_type(self, dbapi):
"""Return the corresponding type object from the underlying DB-API, if
any.
This can be useful for calling ``setinputsizes()``, for example.
"""
return None
@property
def python_type(self):
"""Return the Python type object expected to be returned
by instances of this type, if known.
Basically, for those types which enforce a return type,
or are known across the board to do such for all common
DBAPIs (like ``int`` for example), will return that type.
If a return type is not defined, raises
``NotImplementedError``.
Note that any type also accommodates NULL in SQL which
means you can also get back ``None`` from any type
in practice.
"""
raise NotImplementedError()
def with_variant(self, type_, dialect_name):
"""Produce a new type object that will utilize the given
type when applied to the dialect of the given name.
e.g.::
from sqlalchemy.types import String
from sqlalchemy.dialects import mysql
s = String()
s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql')
The construction of :meth:`.TypeEngine.with_variant` is always
from the "fallback" type to that which is dialect specific.
The returned type is an instance of :class:`.Variant`, which
itself provides a :meth:`~sqlalchemy.types.Variant.with_variant`
that can be called repeatedly.
:param type_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
.. versionadded:: 0.7.2
"""
return Variant(self, {dialect_name: type_})
@util.memoized_property
def _type_affinity(self):
"""Return a rudimental 'affinity' value expressing the general class
of type."""
typ = None
for t in self.__class__.__mro__:
if t in (TypeEngine, UserDefinedType):
return typ
elif issubclass(t, (TypeEngine, UserDefinedType)):
typ = t
else:
return self.__class__
def dialect_impl(self, dialect):
"""Return a dialect-specific implementation for this
:class:`.TypeEngine`.
"""
try:
return dialect._type_memos[self]['impl']
except KeyError:
return self._dialect_info(dialect)['impl']
def _cached_literal_processor(self, dialect):
"""Return a dialect-specific literal processor for this type."""
try:
return dialect._type_memos[self]['literal']
except KeyError:
d = self._dialect_info(dialect)
d['literal'] = lp = d['impl'].literal_processor(dialect)
return lp
def _cached_bind_processor(self, dialect):
"""Return a dialect-specific bind processor for this type."""
try:
return dialect._type_memos[self]['bind']
except KeyError:
d = self._dialect_info(dialect)
d['bind'] = bp = d['impl'].bind_processor(dialect)
return bp
def _cached_result_processor(self, dialect, coltype):
"""Return a dialect-specific result processor for this type."""
try:
return dialect._type_memos[self][coltype]
except KeyError:
d = self._dialect_info(dialect)
# key assumption: DBAPI type codes are
# constants. Else this dictionary would
# grow unbounded.
d[coltype] = rp = d['impl'].result_processor(dialect, coltype)
return rp
def _dialect_info(self, dialect):
"""Return a dialect-specific registry which
caches a dialect-specific implementation, bind processing
function, and one or more result processing functions."""
if self in dialect._type_memos:
return dialect._type_memos[self]
else:
impl = self._gen_dialect_impl(dialect)
if impl is self:
impl = self.adapt(type(self))
# this can't be self, else we create a cycle
assert impl is not self
dialect._type_memos[self] = d = {'impl': impl}
return d
def _gen_dialect_impl(self, dialect):
return dialect.type_descriptor(self)
def adapt(self, cls, **kw):
"""Produce an "adapted" form of this type, given an "impl" class
to work with.
This method is used internally to associate generic
types with "implementation" types that are specific to a particular
dialect.
"""
return util.constructor_copy(self, cls, **kw)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Given an operator and value, gives the type a chance
to return a type which the value should be coerced into.
The default behavior here is conservative; if the right-hand
side is already coerced into a SQL type based on its
Python type, it is usually left alone.
End-user functionality extension here should generally be via
:class:`.TypeDecorator`, which provides more liberal behavior in that
it defaults to coercing the other side of the expression into this
type, thus applying special Python conversions above and beyond those
needed by the DBAPI to both ides. It also provides the public method
:meth:`.TypeDecorator.coerce_compared_value` which is intended for
end-user customization of this behavior.
"""
_coerced_type = _type_map.get(type(value), NULLTYPE)
if _coerced_type is NULLTYPE or _coerced_type._type_affinity \
is self._type_affinity:
return self
else:
return _coerced_type
def _compare_type_affinity(self, other):
return self._type_affinity is other._type_affinity
def compile(self, dialect=None):
"""Produce a string-compiled form of this :class:`.TypeEngine`.
When called with no arguments, uses a "default" dialect
to produce a string result.
:param dialect: a :class:`.Dialect` instance.
"""
# arg, return value is inconsistent with
# ClauseElement.compile()....this is a mistake.
if not dialect:
dialect = self._default_dialect()
return dialect.type_compiler.process(self)
@util.dependencies("sqlalchemy.engine.default")
def _default_dialect(self, default):
if self.__class__.__module__.startswith("sqlalchemy.dialects"):
tokens = self.__class__.__module__.split(".")[0:3]
mod = ".".join(tokens)
return getattr(__import__(mod).dialects, tokens[-1]).dialect()
else:
return default.DefaultDialect()
def __str__(self):
if util.py2k:
return unicode(self.compile()).\
encode('ascii', 'backslashreplace')
else:
return str(self.compile())
def __repr__(self):
return util.generic_repr(self)
class UserDefinedType(TypeEngine):
"""Base for user defined types.
This should be the base of new types. Note that
for most cases, :class:`.TypeDecorator` is probably
more appropriate::
import sqlalchemy.types as types
class MyType(types.UserDefinedType):
def __init__(self, precision = 8):
self.precision = precision
def get_col_spec(self):
return "MYTYPE(%s)" % self.precision
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
Once the type is made, it's immediately usable::
table = Table('foo', meta,
Column('id', Integer, primary_key=True),
Column('data', MyType(16))
)
"""
__visit_name__ = "user_defined"
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if hasattr(self.type, 'adapt_operator'):
util.warn_deprecated(
"UserDefinedType.adapt_operator is deprecated. Create "
"a UserDefinedType.Comparator subclass instead which "
"generates the desired expression constructs, given a "
"particular operator."
)
return self.type.adapt_operator(op), self.type
else:
return op, self.type
comparator_factory = Comparator
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Default behavior for :class:`.UserDefinedType` is the
same as that of :class:`.TypeDecorator`; by default it returns
``self``, assuming the compared value should be coerced into
the same type as this one. See
:meth:`.TypeDecorator.coerce_compared_value` for more detail.
.. versionchanged:: 0.8 :meth:`.UserDefinedType.coerce_compared_value`
now returns ``self`` by default, rather than falling onto the
more fundamental behavior of
:meth:`.TypeEngine.coerce_compared_value`.
"""
return self
class TypeDecorator(TypeEngine):
"""Allows the creation of types which add additional functionality
to an existing type.
This method is preferred to direct subclassing of SQLAlchemy's
built-in types as it ensures that all required functionality of
the underlying type is kept in place.
Typical usage::
import sqlalchemy.types as types
class MyType(types.TypeDecorator):
'''Prefixes Unicode values with "PREFIX:" on the way in and
strips it off on the way out.
'''
impl = types.Unicode
def process_bind_param(self, value, dialect):
return "PREFIX:" + value
def process_result_value(self, value, dialect):
return value[7:]
def copy(self):
return MyType(self.impl.length)
The class-level "impl" attribute is required, and can reference any
TypeEngine class. Alternatively, the load_dialect_impl() method
can be used to provide different type classes based on the dialect
given; in this case, the "impl" variable can reference
``TypeEngine`` as a placeholder.
Types that receive a Python type that isn't similar to the ultimate type
used may want to define the :meth:`TypeDecorator.coerce_compared_value`
method. This is used to give the expression system a hint when coercing
Python objects into bind parameters within expressions. Consider this
expression::
mytable.c.somecol + datetime.date(2009, 5, 15)
Above, if "somecol" is an ``Integer`` variant, it makes sense that
we're doing date arithmetic, where above is usually interpreted
by databases as adding a number of days to the given date.
The expression system does the right thing by not attempting to
coerce the "date()" value into an integer-oriented bind parameter.
However, in the case of ``TypeDecorator``, we are usually changing an
incoming Python type to something new - ``TypeDecorator`` by default will
"coerce" the non-typed side to be the same type as itself. Such as below,
we define an "epoch" type that stores a date value as an integer::
class MyEpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.date(1970, 1, 1)
def process_bind_param(self, value, dialect):
return (value - self.epoch).days
def process_result_value(self, value, dialect):
return self.epoch + timedelta(days=value)
Our expression of ``somecol + date`` with the above type will coerce the
"date" on the right side to also be treated as ``MyEpochType``.
This behavior can be overridden via the
:meth:`~TypeDecorator.coerce_compared_value` method, which returns a type
that should be used for the value of the expression. Below we set it such
that an integer value will be treated as an ``Integer``, and any other
value is assumed to be a date and will be treated as a ``MyEpochType``::
def coerce_compared_value(self, op, value):
if isinstance(value, int):
return Integer()
else:
return self
"""
__visit_name__ = "type_decorator"
def __init__(self, *args, **kwargs):
"""Construct a :class:`.TypeDecorator`.
Arguments sent here are passed to the constructor
of the class assigned to the ``impl`` class level attribute,
assuming the ``impl`` is a callable, and the resulting
object is assigned to the ``self.impl`` instance attribute
(thus overriding the class attribute of the same name).
If the class level ``impl`` is not a callable (the unusual case),
it will be assigned to the same instance attribute 'as-is',
ignoring those arguments passed to the constructor.
Subclasses can override this to customize the generation
of ``self.impl`` entirely.
"""
if not hasattr(self.__class__, 'impl'):
raise AssertionError("TypeDecorator implementations "
"require a class-level variable "
"'impl' which refers to the class of "
"type being decorated")
self.impl = to_instance(self.__class__.impl, *args, **kwargs)
coerce_to_is_types = (util.NoneType, )
"""Specify those Python types which should be coerced at the expression
level to "IS <constant>" when compared using ``==`` (and same for
``IS NOT`` in conjunction with ``!=``.
For most SQLAlchemy types, this includes ``NoneType``, as well as ``bool``.
:class:`.TypeDecorator` modifies this list to only include ``NoneType``,
as typedecorator implementations that deal with boolean types are common.
Custom :class:`.TypeDecorator` classes can override this attribute to
return an empty tuple, in which case no values will be coerced to
constants.
..versionadded:: 0.8.2
Added :attr:`.TypeDecorator.coerce_to_is_types` to allow for easier
control of ``__eq__()`` ``__ne__()`` operations.
"""
class Comparator(TypeEngine.Comparator):
def operate(self, op, *other, **kwargs):
kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).operate(
op, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).reverse_operate(
op, other, **kwargs)
@property
def comparator_factory(self):
return type("TDComparator",
(TypeDecorator.Comparator, self.impl.comparator_factory),
{})
def _gen_dialect_impl(self, dialect):
"""
#todo
"""
adapted = dialect.type_descriptor(self)
if adapted is not self:
return adapted
# otherwise adapt the impl type, link
# to a copy of this TypeDecorator and return
# that.
typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect)
tt = self.copy()
if not isinstance(tt, self.__class__):
raise AssertionError('Type object %s does not properly '
'implement the copy() method, it must '
'return an object of type %s' % (self,
self.__class__))
tt.impl = typedesc
return tt
@property
def _type_affinity(self):
"""
#todo
"""
return self.impl._type_affinity
def type_engine(self, dialect):
"""Return a dialect-specific :class:`.TypeEngine` instance
for this :class:`.TypeDecorator`.
In most cases this returns a dialect-adapted form of
the :class:`.TypeEngine` type represented by ``self.impl``.
Makes usage of :meth:`dialect_impl` but also traverses
into wrapped :class:`.TypeDecorator` instances.
Behavior can be customized here by overriding
:meth:`load_dialect_impl`.
"""
adapted = dialect.type_descriptor(self)
if type(adapted) is not type(self):
return adapted
elif isinstance(self.impl, TypeDecorator):
return self.impl.type_engine(dialect)
else:
return self.load_dialect_impl(dialect)
def load_dialect_impl(self, dialect):
"""Return a :class:`.TypeEngine` object corresponding to a dialect.
This is an end-user override hook that can be used to provide
differing types depending on the given dialect. It is used
by the :class:`.TypeDecorator` implementation of :meth:`type_engine`
to help determine what type should ultimately be returned
for a given :class:`.TypeDecorator`.
By default returns ``self.impl``.
"""
return self.impl
def __getattr__(self, key):
"""Proxy all other undefined accessors to the underlying
implementation."""
return getattr(self.impl, key)
def process_literal_param(self, value, dialect):
"""Receive a literal parameter value to be rendered inline within
a statement.
This method is used when the compiler renders a
literal value without using binds, typically within DDL
such as in the "server default" of a column or an expression
within a CHECK constraint.
The returned string will be rendered into the output string.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def process_bind_param(self, value, dialect):
"""Receive a bound parameter value to be converted.
Subclasses override this method to return the
value that should be passed along to the underlying
:class:`.TypeEngine` object, and from there to the
DBAPI ``execute()`` method.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
This operation should be designed with the reverse operation
in mind, which would be the process_result_value method of
this class.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
"""
raise NotImplementedError()
def process_result_value(self, value, dialect):
"""Receive a result-row column value to be converted.
Subclasses should implement this method to operate on data
fetched from the database.
Subclasses override this method to return the
value that should be passed back to the application,
given a value that is already processed by
the underlying :class:`.TypeEngine` object, originally
from the DBAPI cursor method ``fetchone()`` or similar.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
This operation should be designed to be reversible by
the "process_bind_param" method of this class.
"""
raise NotImplementedError()
@util.memoized_property
def _has_bind_processor(self):
"""memoized boolean, check if process_bind_param is implemented.
Allows the base process_bind_param to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_bind_param.__code__ \
is not TypeDecorator.process_bind_param.__code__
@util.memoized_property
def _has_literal_processor(self):
"""memoized boolean, check if process_literal_param is implemented.
"""
return self.__class__.process_literal_param.__code__ \
is not TypeDecorator.process_literal_param.__code__
def literal_processor(self, dialect):
"""Provide a literal processing function for the given
:class:`.Dialect`.
Subclasses here will typically override :meth:`.TypeDecorator.process_literal_param`
instead of this method directly.
By default, this method makes use of :meth:`.TypeDecorator.process_bind_param`
if that method is implemented, where :meth:`.TypeDecorator.process_literal_param`
is not. The rationale here is that :class:`.TypeDecorator` typically deals
with Python conversions of data that are above the layer of database
presentation. With the value converted by :meth:`.TypeDecorator.process_bind_param`,
the underlying type will then handle whether it needs to be presented to the
DBAPI as a bound parameter or to the database as an inline SQL value.
.. versionadded:: 0.9.0
"""
if self._has_literal_processor:
process_param = self.process_literal_param
elif self._has_bind_processor:
# the bind processor should normally be OK
# for TypeDecorator since it isn't doing DB-level
# handling, the handling here won't be different for bound vs.
# literals.
process_param = self.process_bind_param
else:
process_param = None
if process_param:
impl_processor = self.impl.literal_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.literal_processor(dialect)
def bind_processor(self, dialect):
"""Provide a bound value processing function for the
given :class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for bound value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_bind_param` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_bind_param` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
This method is the reverse counterpart to the
:meth:`result_processor` method of this class.
"""
if self._has_bind_processor:
process_param = self.process_bind_param
impl_processor = self.impl.bind_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.bind_processor(dialect)
@util.memoized_property
def _has_result_processor(self):
"""memoized boolean, check if process_result_value is implemented.
Allows the base process_result_value to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_result_value.__code__ \
is not TypeDecorator.process_result_value.__code__
def result_processor(self, dialect, coltype):
"""Provide a result value processing function for the given
:class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for result value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_result_value` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_result_value` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
:param coltype: An SQLAlchemy data type
This method is the reverse counterpart to the
:meth:`bind_processor` method of this class.
"""
if self._has_result_processor:
process_value = self.process_result_value
impl_processor = self.impl.result_processor(dialect,
coltype)
if impl_processor:
def process(value):
return process_value(impl_processor(value), dialect)
else:
def process(value):
return process_value(value, dialect)
return process
else:
return self.impl.result_processor(dialect, coltype)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
By default, returns self. This method is called by
the expression system when an object using this type is
on the left or right side of an expression against a plain Python
object which does not yet have a SQLAlchemy type assigned::
expr = table.c.somecolumn + 35
Where above, if ``somecolumn`` uses this type, this method will
be called with the value ``operator.add``
and ``35``. The return value is whatever SQLAlchemy type should
be used for ``35`` for this particular operation.
"""
return self
def copy(self):
"""Produce a copy of this :class:`.TypeDecorator` instance.
This is a shallow copy and is provided to fulfill part of
the :class:`.TypeEngine` contract. It usually does not
need to be overridden unless the user-defined :class:`.TypeDecorator`
has local state that should be deep-copied.
"""
instance = self.__class__.__new__(self.__class__)
instance.__dict__.update(self.__dict__)
return instance
def get_dbapi_type(self, dbapi):
"""Return the DBAPI type object represented by this
:class:`.TypeDecorator`.
By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the
underlying "impl".
"""
return self.impl.get_dbapi_type(dbapi)
def compare_values(self, x, y):
"""Given two values, compare them for equality.
By default this calls upon :meth:`.TypeEngine.compare_values`
of the underlying "impl", which in turn usually
uses the Python equals operator ``==``.
This function is used by the ORM to compare
an original-loaded value with an intercepted
"changed" value, to determine if a net change
has occurred.
"""
return self.impl.compare_values(x, y)
def __repr__(self):
return util.generic_repr(self, to_inspect=self.impl)
class Variant(TypeDecorator):
"""A wrapping type that selects among a variety of
implementations based on dialect in use.
The :class:`.Variant` type is typically constructed
using the :meth:`.TypeEngine.with_variant` method.
.. versionadded:: 0.7.2
.. seealso:: :meth:`.TypeEngine.with_variant` for an example of use.
"""
def __init__(self, base, mapping):
"""Construct a new :class:`.Variant`.
:param base: the base 'fallback' type
:param mapping: dictionary of string dialect names to
:class:`.TypeEngine` instances.
"""
self.impl = base
self.mapping = mapping
def load_dialect_impl(self, dialect):
if dialect.name in self.mapping:
return self.mapping[dialect.name]
else:
return self.impl
def with_variant(self, type_, dialect_name):
"""Return a new :class:`.Variant` which adds the given
type + dialect name to the mapping, in addition to the
mapping present in this :class:`.Variant`.
:param type_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
"""
if dialect_name in self.mapping:
raise exc.ArgumentError(
"Dialect '%s' is already present in "
"the mapping for this Variant" % dialect_name)
mapping = self.mapping.copy()
mapping[dialect_name] = type_
return Variant(self.impl, mapping)
def _reconstitute_comparator(expression):
return expression.comparator
def to_instance(typeobj, *arg, **kw):
if typeobj is None:
return NULLTYPE
if util.callable(typeobj):
return typeobj(*arg, **kw)
else:
return typeobj
def adapt_type(typeobj, colspecs):
if isinstance(typeobj, type):
typeobj = typeobj()
for t in typeobj.__class__.__mro__[0:-1]:
try:
impltype = colspecs[t]
break
except KeyError:
pass
else:
# couldnt adapt - so just return the type itself
# (it may be a user-defined type)
return typeobj
# if we adapted the given generic type to a database-specific type,
# but it turns out the originally given "generic" type
# is actually a subclass of our resulting type, then we were already
# given a more specific type than that required; so use that.
if (issubclass(typeobj.__class__, impltype)):
return typeobj
return typeobj.adapt(impltype)
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/encodings/cp850.py
|
593
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP850.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp850',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2017, # DOUBLE LOW LINE
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
u'\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\u0131' # 0x00d5 -> LATIN SMALL LETTER DOTLESS I
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
u'\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2017' # 0x00f2 -> DOUBLE LOW LINE
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0131: 0x00d5, # LATIN SMALL LETTER DOTLESS I
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x2017: 0x00f2, # DOUBLE LOW LINE
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
ddzialak/boto
|
refs/heads/develop
|
boto/s3/connection.py
|
1
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import base64
from boto.compat import six, urllib
import time
from boto.auth import detect_potential_s3sigv4
import boto.utils
from boto.connection import AWSAuthConnection
from boto import handler
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.resultset import ResultSet
from boto.exception import BotoClientError, S3ResponseError
def check_lowercase_bucketname(n):
"""
Bucket names must not contain uppercase characters. We check for
this by appending a lowercase character and testing with islower().
Note this also covers cases like numeric bucket names with dashes.
>>> check_lowercase_bucketname("Aaaa")
Traceback (most recent call last):
...
BotoClientError: S3Error: Bucket names cannot contain upper-case
characters when using either the sub-domain or virtual hosting calling
format.
>>> check_lowercase_bucketname("1234-5678-9123")
True
>>> check_lowercase_bucketname("abcdefg1234")
True
"""
if not (n + 'a').islower():
raise BotoClientError("Bucket names cannot contain upper-case " \
"characters when using either the sub-domain or virtual " \
"hosting calling format.")
return True
def assert_case_insensitive(f):
def wrapper(*args, **kwargs):
if len(args) == 3 and check_lowercase_bucketname(args[2]):
pass
return f(*args, **kwargs)
return wrapper
class _CallingFormat(object):
def get_bucket_server(self, server, bucket):
return ''
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '%s://' % protocol
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
def build_host(self, server, bucket):
if bucket == '':
return server
else:
return self.get_bucket_server(server, bucket)
def build_auth_path(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path = ''
if bucket != '':
path = '/' + bucket
return path + '/%s' % urllib.parse.quote(key)
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
return '/%s' % urllib.parse.quote(key)
class SubdomainCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return '%s.%s' % (bucket, server)
class VHostCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return bucket
class OrdinaryCallingFormat(_CallingFormat):
def get_bucket_server(self, server, bucket):
return server
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path_base = '/'
if bucket:
path_base += "%s/" % bucket
return path_base + urllib.parse.quote(key)
class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat):
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '//'
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
class Location(object):
DEFAULT = '' # US Classic Region
EU = 'EU'
USWest = 'us-west-1'
USWest2 = 'us-west-2'
SAEast = 'sa-east-1'
APNortheast = 'ap-northeast-1'
APSoutheast = 'ap-southeast-1'
APSoutheast2 = 'ap-southeast-2'
CNNorth1 = 'cn-north-1'
class NoHostProvided(object):
# An identifying object to help determine whether the user provided a
# ``host`` or not. Never instantiated.
pass
class HostRequiredError(BotoClientError):
pass
class S3Connection(AWSAuthConnection):
DefaultHost = boto.config.get('s3', 'host', 's3.amazonaws.com')
DefaultCallingFormat = boto.config.get('s3', 'calling_format', 'boto.s3.connection.SubdomainCallingFormat')
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=NoHostProvided, debug=0, https_connection_factory=None,
calling_format=DefaultCallingFormat, path='/',
provider='aws', bucket_class=Bucket, security_token=None,
suppress_consec_slashes=True, anon=False,
validate_certs=None, profile_name=None):
no_host_provided = False
if host is NoHostProvided:
no_host_provided = True
host = self.DefaultHost
if isinstance(calling_format, six.string_types):
calling_format=boto.utils.find_class(calling_format)()
self.calling_format = calling_format
self.bucket_class = bucket_class
self.anon = anon
super(S3Connection, self).__init__(host,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
debug=debug, https_connection_factory=https_connection_factory,
path=path, provider=provider, security_token=security_token,
suppress_consec_slashes=suppress_consec_slashes,
validate_certs=validate_certs, profile_name=profile_name)
# We need to delay until after the call to ``super`` before checking
# to see if SigV4 is in use.
if no_host_provided:
if 'hmac-v4-s3' in self._required_auth_capability():
raise HostRequiredError(
"When using SigV4, you must specify a 'host' parameter."
)
@detect_potential_s3sigv4
def _required_auth_capability(self):
if self.anon:
return ['anon']
else:
return ['s3']
def __iter__(self):
for bucket in self.get_all_buckets():
yield bucket
def __contains__(self, bucket_name):
return not (self.lookup(bucket_name) is None)
def set_bucket_class(self, bucket_class):
"""
Set the Bucket class associated with this bucket. By default, this
would be the boto.s3.key.Bucket class but if you want to subclass that
for some reason this allows you to associate your new class.
:type bucket_class: class
:param bucket_class: A subclass of Bucket that can be more specific
"""
self.bucket_class = bucket_class
def build_post_policy(self, expiration_time, conditions):
"""
Taken from the AWS book Python examples and modified for use with boto
"""
assert isinstance(expiration_time, time.struct_time), \
'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
def build_post_form_args(self, bucket_name, key, expires_in=6000,
acl=None, success_action_redirect=None,
max_content_length=None,
http_method='http', fields=None,
conditions=None, storage_class='STANDARD',
server_side_encryption=None):
"""
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the
actual form. This does not return the file input field which also
needs to be added
:type bucket_name: string
:param bucket_name: Bucket to submit to
:type key: string
:param key: Key name, optionally add ${filename} to the end to
attach the submitted filename
:type expires_in: integer
:param expires_in: Time (in seconds) before this expires, defaults
to 6000
:type acl: string
:param acl: A canned ACL. One of:
* private
* public-read
* public-read-write
* authenticated-read
* bucket-owner-read
* bucket-owner-full-control
:type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
:type max_content_length: integer
:param max_content_length: Maximum size for this file
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:type storage_class: string
:param storage_class: Storage class to use for storing the object.
Valid values: STANDARD | REDUCED_REDUNDANCY
:type server_side_encryption: string
:param server_side_encryption: Specifies server-side encryption
algorithm to use when Amazon S3 creates an object.
Valid values: None | AES256
:rtype: dict
:return: A dictionary containing field names/values as well as
a url to POST to
.. code-block:: python
"""
if fields is None:
fields = []
if conditions is None:
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
# Generate policy document
conditions.append('{"bucket": "%s"}' % bucket_name)
if key.endswith("${filename}"):
conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
else:
conditions.append('{"key": "%s"}' % key)
if acl:
conditions.append('{"acl": "%s"}' % acl)
fields.append({"name": "acl", "value": acl})
if success_action_redirect:
conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
fields.append({"name": "success_action_redirect", "value": success_action_redirect})
if max_content_length:
conditions.append('["content-length-range", 0, %i]' % max_content_length)
if self.provider.security_token:
fields.append({'name': 'x-amz-security-token',
'value': self.provider.security_token})
conditions.append('{"x-amz-security-token": "%s"}' % self.provider.security_token)
if storage_class:
fields.append({'name': 'x-amz-storage-class',
'value': storage_class})
conditions.append('{"x-amz-storage-class": "%s"}' % storage_class)
if server_side_encryption:
fields.append({'name': 'x-amz-server-side-encryption',
'value': server_side_encryption})
conditions.append('{"x-amz-server-side-encryption": "%s"}' % server_side_encryption)
policy = self.build_post_policy(expiration, conditions)
# Add the base64-encoded policy document as the 'policy' field
policy_b64 = base64.b64encode(policy)
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
fields.append({"name": "AWSAccessKeyId",
"value": self.aws_access_key_id})
# Add signature for encoded policy document as the
# 'signature' field
signature = self._auth_handler.sign_string(policy_b64)
fields.append({"name": "signature", "value": signature})
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
url = '%s://%s/' % (http_method,
self.calling_format.build_host(self.server_name(),
bucket_name))
return {"action": url, "fields": fields}
def generate_url_sigv4(self, expires_in, method, bucket='', key='',
headers=None, force_http=False,
response_headers=None, version_id=None,
iso_date=None):
path = self.calling_format.build_path_base(bucket, key)
auth_path = self.calling_format.build_auth_path(bucket, key)
host = self.calling_format.build_host(self.server_name(), bucket)
# For presigned URLs we should ignore the port if it's HTTPS
if host.endswith(':443'):
host = host[:-4]
params = {}
if version_id is not None:
params['VersionId'] = version_id
http_request = self.build_base_http_request(method, path, auth_path,
headers=headers, host=host,
params=params)
return self._auth_handler.presign(http_request, expires_in,
iso_date=iso_date)
def generate_url(self, expires_in, method, bucket='', key='', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None):
if self._auth_handler.capability[0] == 'hmac-v4-s3':
# Handle the special sigv4 case
return self.generate_url_sigv4(expires_in, method, bucket=bucket,
key=key, headers=headers, force_http=force_http,
response_headers=response_headers, version_id=version_id)
headers = headers or {}
if expires_in_absolute:
expires = int(expires_in)
else:
expires = int(time.time() + expires_in)
auth_path = self.calling_format.build_auth_path(bucket, key)
auth_path = self.get_path(auth_path)
# optional version_id and response_headers need to be added to
# the query param list.
extra_qp = []
if version_id is not None:
extra_qp.append("versionId=%s" % version_id)
if response_headers:
for k, v in response_headers.items():
extra_qp.append("%s=%s" % (k, urllib.parse.quote(v)))
if self.provider.security_token:
headers['x-amz-security-token'] = self.provider.security_token
if extra_qp:
delimiter = '?' if '?' not in auth_path else '&'
auth_path += delimiter + '&'.join(extra_qp)
c_string = boto.utils.canonical_string(method, auth_path, headers,
expires, self.provider)
b64_hmac = self._auth_handler.sign_string(c_string)
encoded_canonical = urllib.parse.quote(b64_hmac, safe='')
self.calling_format.build_path_base(bucket, key)
if query_auth:
query_part = '?' + self.QueryString % (encoded_canonical, expires,
self.aws_access_key_id)
else:
query_part = ''
if headers:
hdr_prefix = self.provider.header_prefix
for k, v in headers.items():
if k.startswith(hdr_prefix):
# headers used for sig generation must be
# included in the url also.
extra_qp.append("%s=%s" % (k, urllib.parse.quote(v)))
if extra_qp:
delimiter = '?' if not query_part else '&'
query_part += delimiter + '&'.join(extra_qp)
if force_http:
protocol = 'http'
port = 80
else:
protocol = self.protocol
port = self.port
return self.calling_format.build_url_base(self, protocol,
self.server_name(port),
bucket, key) + query_part
def get_all_buckets(self, headers=None):
response = self.make_request('GET', headers=headers)
body = response.read()
if response.status > 300:
raise self.provider.storage_response_error(
response.status, response.reason, body)
rs = ResultSet([('Bucket', self.bucket_class)])
h = handler.XmlHandler(rs, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return rs
def get_canonical_user_id(self, headers=None):
"""
Convenience method that returns the "CanonicalUserID" of the
user who's credentials are associated with the connection.
The only way to get this value is to do a GET request on the
service which returns all buckets associated with the account.
As part of that response, the canonical userid is returned.
This method simply does all of that and then returns just the
user id.
:rtype: string
:return: A string containing the canonical user id.
"""
rs = self.get_all_buckets(headers=headers)
return rs.owner.id
def get_bucket(self, bucket_name, validate=True, headers=None):
"""
Retrieves a bucket by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised. If
you are unsure if the bucket exists or not, you can use the
``S3Connection.lookup`` method, which will either return a valid bucket
or ``None``.
If ``validate=False`` is passed, no request is made to the service (no
charge/communication delay). This is only safe to do if you are **sure**
the bucket exists.
If the default ``validate=True`` is passed, a request is made to the
service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched
a list of keys (but with a max limit set to ``0``, always returning an empty
list) in the bucket (& included better error messages), at an
increased expense. As of Boto v2.25.0, this now performs a HEAD request
(less expensive but worse error messages).
If you were relying on parsing the error message before, you should call
something like::
bucket = conn.get_bucket('<bucket_name>', validate=False)
bucket.get_all_keys(maxkeys=0)
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to verify the bucket exists
on the service-side. (Default: ``True``)
"""
if validate:
return self.head_bucket(bucket_name, headers=headers)
else:
return self.bucket_class(self, bucket_name)
def head_bucket(self, bucket_name, headers=None):
"""
Determines if a bucket exists by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:returns: A <Bucket> object
"""
response = self.make_request('HEAD', bucket_name, headers=headers)
body = response.read()
if response.status == 200:
return self.bucket_class(self, bucket_name)
elif response.status == 403:
# For backward-compatibility, we'll populate part of the exception
# with the most-common default.
err = self.provider.storage_response_error(
response.status,
response.reason,
body
)
err.error_code = 'AccessDenied'
err.error_message = 'Access Denied'
raise err
elif response.status == 404:
# For backward-compatibility, we'll populate part of the exception
# with the most-common default.
err = self.provider.storage_response_error(
response.status,
response.reason,
body
)
err.error_code = 'NoSuchBucket'
err.error_message = 'The specified bucket does not exist'
raise err
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def lookup(self, bucket_name, validate=True, headers=None):
"""
Attempts to get a bucket from S3.
Works identically to ``S3Connection.get_bucket``, save for that it
will return ``None`` if the bucket does not exist instead of throwing
an exception.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to fetch all keys within the
given bucket. (Default: ``True``)
"""
try:
bucket = self.get_bucket(bucket_name, validate, headers=headers)
except:
bucket = None
return bucket
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None):
"""
Creates a new located bucket. By default it's in the USA. You can pass
Location.EU to create a European bucket (S3) or European Union bucket
(GCS).
:type bucket_name: string
:param bucket_name: The name of the new bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to AWS.
:type location: str
:param location: The location of the new bucket. You can use one of the
constants in :class:`boto.s3.connection.Location` (e.g. Location.EU,
Location.USWest, etc.).
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
"""
if (location and not location.startswith("us-")):
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header: policy}
if location == Location.DEFAULT:
data = ''
else:
data = '<CreateBucketConfiguration><LocationConstraint>' + \
location + '</LocationConstraint></CreateBucketConfiguration>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def delete_bucket(self, bucket, headers=None):
"""
Removes an S3 bucket.
In order to remove the bucket, it must first be empty. If the bucket is
not empty, an ``S3ResponseError`` will be raised.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
"""
response = self.make_request('DELETE', bucket, headers=headers)
body = response.read()
if response.status != 204:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def make_request(self, method, bucket='', key='', headers=None, data='',
query_args=None, sender=None, override_num_retries=None,
retry_handler=None):
if isinstance(bucket, self.bucket_class):
bucket = bucket.name
if isinstance(key, Key):
key = key.name
path = self.calling_format.build_path_base(bucket, key)
#boto.log.debug('path=%s' % path)
auth_path = self.calling_format.build_auth_path(bucket, key)
#boto.log.debug('auth_path=%s' % auth_path)
host = self.calling_format.build_host(self.server_name(), bucket)
if query_args:
path += '?' + query_args
#boto.log.debug('path=%s' % path)
auth_path += '?' + query_args
#boto.log.debug('auth_path=%s' % auth_path)
return super(S3Connection, self).make_request(
method, path, headers,
data, host, auth_path, sender,
override_num_retries=override_num_retries,
retry_handler=retry_handler
)
|
Brocade-OpenSource/OpenStack-DNRM-Nova
|
refs/heads/master
|
nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py
|
3
|
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the aggregates admin api."""
from webob import exc
from nova.api.openstack.compute.plugins.v3 import aggregates
from nova import context
from nova import exception
from nova import test
from nova.tests import matchers
AGGREGATE_LIST = [
{"name": "aggregate1", "id": "1", "availability_zone": "nova1"},
{"name": "aggregate2", "id": "2", "availability_zone": "nova1"},
{"name": "aggregate3", "id": "3", "availability_zone": "nova2"},
{"name": "aggregate1", "id": "4", "availability_zone": "nova1"}]
AGGREGATE = {"name": "aggregate1",
"id": "1",
"availability_zone": "nova1",
"metadata": {"foo": "bar"},
"hosts": ["host1, host2"]}
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class AggregateTestCase(test.TestCase):
"""Test Case for aggregates admin api."""
def setUp(self):
super(AggregateTestCase, self).setUp()
self.controller = aggregates.AggregateController()
self.req = FakeRequest()
self.context = self.req.environ['nova.context']
def test_index(self):
def stub_list_aggregates(context):
if context is None:
raise Exception()
return AGGREGATE_LIST
self.stubs.Set(self.controller.api, 'get_aggregate_list',
stub_list_aggregates)
result = self.controller.index(self.req)
self.assertEqual(AGGREGATE_LIST, result["aggregates"])
def test_create(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertEqual("nova1", availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_with_duplicate_aggregate_name(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.AggregateNameExists(aggregate_name=name)
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_incorrect_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.InvalidAggregateAction(action='create_aggregate',
aggregate_id="'N/A'",
reason='invalid zone')
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exc.HTTPBadRequest,
self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova_bad"}})
def test_create_with_no_aggregate(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"foo":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_no_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"foo": "test",
"availability_zone": "nova1"}})
def test_create_with_no_availability_zone(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "test",
"foo": "nova1"}})
def test_create_with_extra_invalid_arg(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, dict(name="test",
availability_zone="nova1",
foo='bar'))
def test_show(self):
def stub_get_aggregate(context, id):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", id, "id")
return AGGREGATE
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
aggregate = self.controller.show(self.req, "1")
self.assertEqual(AGGREGATE, aggregate["aggregate"])
def test_show_with_invalid_id(self):
def stub_get_aggregate(context, id):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
self.assertRaises(exc.HTTPNotFound,
self.controller.show, self.req, "2")
def test_update(self):
body = {"aggregate": {"name": "new_name",
"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual(body["aggregate"], values, "values")
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_only_name(self):
body = {"aggregate": {"name": "new_name"}}
def stub_update_aggregate(context, aggregate, values):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_only_availability_zone(self):
body = {"aggregate": {"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_no_updates(self):
test_metadata = {"aggregate": {}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_no_update_key(self):
test_metadata = {"asdf": {}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_wrong_updates(self):
test_metadata = {"aggregate": {"status": "disable",
"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_bad_aggregate(self):
test_metadata = {"aggregate": {"name": "test_name"}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.update,
self.req, "2", body=test_metadata)
def test_add_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual("host1", host, "host")
return AGGREGATE
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
aggregate = self.controller._add_host(self.req, "1",
body={"add_host": {"host":
"host1"}})
self.assertEqual(aggregate["aggregate"], AGGREGATE)
def test_add_host_with_already_added_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.AggregateHostExists(aggregate_id=aggregate,
host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller._add_host,
self.req, "1",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_aggregate(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller._add_host,
self.req, "bogus_aggregate",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller._add_host,
self.req, "1",
body={"add_host": {"host": "bogus_host"}})
def test_add_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller._add_host,
self.req, "1", body={"add_host": {"asdf": "asdf"}})
def test_remove_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual("host1", host, "host")
stub_remove_host_from_aggregate.called = True
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.controller._remove_host(self.req, "1",
body={"remove_host": {"host": "host1"}})
self.assertTrue(stub_remove_host_from_aggregate.called)
def test_remove_host_with_bad_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller._remove_host,
self.req, "bogus_aggregate",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_host_not_in_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateHostNotFound(aggregate_id=aggregate,
host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller._remove_host,
self.req, "1",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_bad_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller._remove_host,
self.req, "1", body={"remove_host": {"host": "bogushost"}})
def test_remove_host_with_extra_param(self):
self.assertRaises(exc.HTTPBadRequest, self.controller._remove_host,
self.req, "1", body={"remove_host": {"asdf": "asdf",
"host": "asdf"}})
def test_set_metadata(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertThat(body["set_metadata"]['metadata'],
matchers.DictMatches(values))
return AGGREGATE
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
result = self.controller._set_metadata(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_set_metadata_with_bad_aggregate(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller._set_metadata,
self.req, "bad_aggregate", body=body)
def test_set_metadata_with_missing_metadata(self):
body = {"asdf": {"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller._set_metadata,
self.req, "1", body=body)
def test_set_metadata_with_extra_params(self):
body = {"metadata": {"foo": "bar"}, "asdf": {"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller._set_metadata,
self.req, "1", body=body)
def test_delete_aggregate(self):
def stub_delete_aggregate(context, aggregate):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
stub_delete_aggregate.called = True
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
self.controller.delete(self.req, "1")
self.assertTrue(stub_delete_aggregate.called)
def test_delete_aggregate_with_bad_aggregate(self):
def stub_delete_aggregate(context, aggregate):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.delete,
self.req, "bogus_aggregate")
|
dhwang99/statistics_introduction
|
refs/heads/master
|
learning/linear_all_subset.py
|
1
|
#encoding: utf8
import numpy as np
import pdb
from scipy.stats import f as f_stats
import matplotlib.pyplot as plt
from data_loader import load_data, normalize_data
from linear_regression import leasq
from cv_common import gen_CV_samples_by_K_folds, one_std_error_rule
'''
子集选择
穷举法找最优模型
注意:要求传入数据有偏置项,这样可以算出截距情况, 不需要对Y进行标准化(这个教材上的不同)
'''
import itertools
def all_subset_regression(X, Y, X_t, Y_t, comp_testmse=1):
beta_count = X.shape[1]
beta_id_lst = range(1, beta_count)
subids = range(0, beta_count)
#所有的计算结果, 2**beta_count. 结构为: [sid][sub train_mse test_mse]
subs = []
#相同特征数下,最优的子集
best_subs = []
for sid in subids:
subs.append([])
#从给定的特征id列表beta_id_lst里,取出sid个的全部组合
sub_list = list(itertools.combinations(beta_id_lst, sid))
train_mse = 1e10
test_mse = 1e10
test_sub = []
for sub in sub_list:
#带了截距项
sub = np.hstack((np.array([0]), np.array(sub, dtype='int')))
_X= X[:, sub]
_X_t = X_t[:, sub]
beta, z_scores, _tre, _testmse, sig2_hat, se_beta_hat = leasq(_X, Y, _X_t, Y_t, sub)
subs[sid].append((sub, _tre, _testmse))
#取当前子集下,最小测试误差结果
if (comp_testmse == 1 and (test_mse > _testmse)) or ((comp_testmse == 0) and (train_mse > _tre)):
train_mse =_tre
test_mse = _testmse
test_sub = sub
best_subs.append((test_sub, train_mse, test_mse))
return subs, best_subs
'''
用AIC选择最优特征
也就是选择每个p下,最小AIC对应的特征集合,并保存AIC值
其实和全算一遍AIC,然后取最小的方法一样
选出最小的后,用对应的特征进行回归训练和预测
AIC选择
AIC(M) = -2L(M) + 2p \sim RSS(M) + 2*sigma_hat^2 * p \sim RSS(M)/sigma_hat^2 + 2p
强调!!!!!!!
RSS是 trainX的 RSS !!!!!!!!
'''
from linear_regression import leasq
def all_subset_regression_by_AIC():
#读取数据,并进行标准化
X, Y, X_t, Y_t, dt_conv_fun = load_data()
#求全局的训练sigma估计
beta, z_scores, train_mse, test_mse, sigma2_hat, se_beta_hat = leasq(X, Y, X_t, Y_t)
#计算所有子集选择下的结果. 注意下,比较的是train_mse
subs, best_subs = all_subset_regression(X, Y, X_t, Y_t, comp_testmse=0)
unzip_best_subs = zip(*best_subs)
subids = range(0, len(best_subs))
#求各最优子集下的AIC. 参数相同,则aic只与RSS相关,计算最优子集的RSS一定是最小的
best_sub_AICs = map(lambda p:(unzip_best_subs[1][p]*len(Y)/sigma2_hat+2*p), subids)
for bid in subids:
print "by AIC rule:sub_id: %d, train mse: %.4f, test mse: %.4f, AIC: %.4f, subids: %s" % \
(bid, unzip_best_subs[1][bid], unzip_best_subs[2][bid], best_sub_AICs[bid], unzip_best_subs[0][bid])
#求AIC规则下的最优subid, best sub id
print ""
bid = np.argmin(best_sub_AICs)
print "Best sub by AIC rule:best_sub_id: %d, train mse: %.4f, test mse: %.4f, AIC: %.4f, subids: %s" % \
(bid, unzip_best_subs[1][bid], unzip_best_subs[2][bid], best_sub_AICs[bid], unzip_best_subs[0][bid])
#绘制AIC随子集个数的变化图
plt.clf()
l2 = plt.plot(subids, best_sub_AICs, color='g', label='mse test')
plt.legend(loc='best')
plt.savefig('images/all_subset_regression_AIC.png', format='png')
#绘制mse随子集个数的变化图. 这个和子集选择的相同
plt.clf()
l1 = plt.plot(subids, unzip_best_subs[1], color='r', label='mse train')
l2 = plt.plot(subids, unzip_best_subs[2], color='g', label='mse test')
plt.legend(loc='best')
plt.savefig('images/all_subset_regression_AIC_mse.png', format='png')
subs = np.array(unzip_best_subs[0][bid])
beta, z_scores, _tre, _testmse, sig2_hat, se_beta_hat = \
leasq(X[:,subs], Y, X_t[:, subs], Y_t)
print "Best_rst: train_mse: %.4f test_mse: %.4f" % (_tre, _testmse)
return
'''
交叉验证选最佳子集
!!注:需要在相同的一组特征上应用CV,故而给的例子看起来是有问题的!
'''
def all_subset_regression_by_CV(same_to_book=False):
#读取数据,不进行标准化, 由交叉验证进行标准化
train_X, train_Y, test_X, test_Y, dt_conv_fun = load_data(type=0, need_bias=0, y_standard=0)
K = 10
CV_samples = gen_CV_samples_by_K_folds(train_X, train_Y, K)
Kfold_subs = []
for ki in range(K):
cv_s = CV_samples[ki]
train_X_CV, train_Y_CV, test_X_CV, test_Y_CV, nf = normalize_data(cv_s[0], cv_s[1], cv_s[2], cv_s[3])
subs, best_subs = all_subset_regression(train_X_CV, train_Y_CV, test_X_CV, test_Y_CV)
lin_subs = []
if same_to_book == False:
#对子集回归结果进行展开,方便计算
map(lambda x:lin_subs.extend(x), subs)
else:
#和教材的算法一致
lin_subs = best_subs
Kfold_subs.append(lin_subs)
subset_size = len(Kfold_subs[0])
subset_testcv = np.zeros(subset_size)
subset_testse = np.zeros(subset_size)
subset_sids = []
for i in range(subset_size):
sub_i = zip(*map(lambda k:Kfold_subs[k][i], range(K)))
sub_ids = sub_i[0][0]
train_mse_cv = np.array(sub_i[1])
test_mse_cv = np.array(sub_i[2])
subset_sids.append(sub_ids)
subset_testcv[i] = test_mse_cv.mean()
subset_testse[i] = test_mse_cv.std()/np.sqrt(K)
best_subid, min_id = one_std_error_rule(subset_testcv, subset_testse, reverse=True)
#pdb.set_trace()
print "best_subid: %d, best_subs: %s" % (best_subid, subset_sids[best_subid])
X, Y, X_t, Y_t, dt_conv_fun = load_data()
subs = np.array(subset_sids[best_subid])
beta, z_scores, _tre, _testmse, sig2_hat, se_beta_hat = \
leasq(X[:,subs], Y, X_t[:, subs], Y_t)
print "Best_rst: train_mse: %.4f test_mse: %.4f" % (_tre, _testmse)
return
if __name__ == '__main__':
#读取数据,并进行标准化
X, Y, X_t, Y_t, dt_cf = load_data(need_bias=0, y_standard=1)
subs, best_subs = all_subset_regression(X, Y, X_t, Y_t)
unzip_best_subs = zip(*best_subs)
subids = range(0, len(best_subs))
"all subset regression:"
#输出不同特征数下的最优子集
#根据 test_mse 结果进行子集选择(选最优的即可,和k折CV不同:CV有一个一倍标准差准则)
best_subid = 0
test_mse = 1e10
for i in subids:
if best_subs[i][2] < test_mse:
test_mse = best_subs[i][2]
best_subid = i
print "sid:%d train_mse:%.4f mse_tst:%.4f subset:%s" % (i, best_subs[i][1], best_subs[i][2], best_subs[i][0])
#输出最优子集
print "best subid is: %d, features are: %s, train mse: %.4f test mse: %.4f" % \
(best_subid, best_subs[best_subid][0], best_subs[best_subid][1], best_subs[best_subid][2])
l1 = plt.plot(subids, unzip_best_subs[1], color='r', label='mse train')
l2 = plt.plot(subids, unzip_best_subs[2], color='g', label='mse test')
plt.legend(loc='best')
plt.savefig('images/all_subset_regression.png', format='png')
plt.clf()
print ""
#子集选择: AIC评价
print "all_subset_regression_AIC:"
all_subset_regression_by_AIC()
'''
重复了10次,从结果看,书上给的算法是很不稳定的
'''
for i in range(3):
plt.clf()
print ""
#子集选择: CV评价
print "cross validation, same_to_book=False"
all_subset_regression_by_CV()
#子集选择: CV评价, 和教材一致
print "cross validation, same_to_book=True"
all_subset_regression_by_CV(same_to_book=True)
|
noslenfa/ubiwhere_challenge
|
refs/heads/master
|
clientum/clientum/urls.py
|
1
|
"""clientum URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
from musics import views
from django.conf import settings
urlpatterns = [
url(r'^$', views.index),
url(r'^admin/', include(admin.site.urls)),
url(r'^users/(?P<id>.*)$', views.list_users),
url(r'^musics/(?P<id>.*)$', views.list_musics),
url(r'^add_user/', views.add_user),
url(r'^add_music/', views.add_music),
url(r'^delete_music/(?P<id>.*)$', views.delete_music),
url(r'^delete_user/(?P<id>.*)$', views.delete_user),
url(r'^favorites_delete/(?P<id>.*)/(?P<title>.*)$', views.favorites_delete),
url(r'^favorites_add/(?P<id>.*)/(?P<title>.*)$', views.favorites_add),
url(r'^tracks/', views.tracks),
url(r'^add_tracks/(?P<title>.*)/(?P<artist>.*)/(?P<album>.*)$', views.add_tracks),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
]
|
ZuluPro/libcloud
|
refs/heads/trunk
|
docs/examples/compute/openstack/tenant_name.py
|
60
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
OpenStack = get_driver(Provider.OPENSTACK)
driver = OpenStack('your_auth_username', 'your_auth_password',
ex_tenant_name='mytenant',
ex_force_auth_url='http://192.168.1.101:5000',
ex_force_auth_version='2.0_password')
|
nikolas/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/regressiontests/dispatch/__init__.py
|
277
|
"""Unit-tests for the dispatch project
"""
|
Star2Billing/newfies-dialer
|
refs/heads/develop
|
newfies/survey/constants.py
|
4
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from django.utils.translation import ugettext_lazy as _
from django_lets_go.utils import Choice
class SECTION_TYPE(Choice):
PLAY_MESSAGE = 1, _('PLAY MESSAGE')
MULTI_CHOICE = 2, _('MULTI-CHOICE')
RATING_SECTION = 3, _('RATING QUESTION')
CAPTURE_DIGITS = 4, _('CAPTURE DIGITS')
RECORD_MSG = 5, _('RECORD MESSAGE')
CALL_TRANSFER = 6, _('CALL TRANSFER')
HANGUP_SECTION = 7, _('HANGUP')
CONFERENCE = 8, _('CONFERENCE')
DNC = 9, _('DNC')
SMS = 10, _('SMS')
SURVEY_COLUMN_NAME = {
'name': _('name'),
'description': _('description'),
'date': _('date')
}
SURVEY_CALL_RESULT_NAME = {
'date': _('call-date '),
'destination': _('destination'),
'duration': _('duration'),
'disposition': _('disposition'),
'result': _('survey result')
}
SEALED_SURVEY_COLUMN_NAME = {
'name': _('name'),
'description': _('description'),
'campaign': _('campaign'),
'date': _('date')
}
|
Chris35Wills/Chris35Wills.github.io
|
refs/heads/master
|
_drafts/CONVOLUTION/moving_window_example.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 20 12:09:54 2018
@author: chrwil
"""
import numpy as np
import matplotlib.pyplot as plt
# Make some toy data
arr_in = np.round(np.random.uniform(low=0, high=20, size=(11,11)), 2)
arr_out=np.ones(arr_in.shape)
# Set a kernel size (e.g. value of 3 for a 3x3 window)
kernel_size=3
#stepsize=1
#~~~~~~~~~~~~~~
# Pad image to a width of (kernel size -1) /2
# Essential as the moving window needs to operate on your array corners
arr_proc=np.pad(arr_in, int((kernel_size-1)/2), mode='constant', constant_values=-9999)
#~~~~~~~~~~~~~~
# Create some function to work on values within a kernel
def calc_slope(array, y, x, kernel_size):
"""
"""
if array[(y,x)] != -9999.0:
half_kernel = (kernel_size-1)//2
subimg = array[(y - half_kernel):(y + half_kernel + 1), (x - half_kernel):(x + half_kernel + 1)]
assert subimg.shape == (kernel_size, kernel_size), "Subimage dimensions not equal to kernel - you're probably at an edge - add padding to you array"
# Do soethign with the subimg (the extracted kernel)
slp_subimg=101
return(slp_subimg)
else:
print("Move to next point")
# # Run through (and plot if True)
# for out_i, ii in enumerate(range(0, arr_in.shape[0], stepsize)): # y
# for out_j, jj in enumerate(range(0, arr_in.shape[1], stepsize)): # x
# arr_out[out_i, out_j] = arr_in[out_i, out_j]
# arr_out[out_i, out_j] = calc_slope(arr_in, ii, jj, kernel_size)
#
#
stepsize=1
for out_i, ii in enumerate(range(0,arr_proc.shape[0]-1, stepsize)): # y
for out_j, jj in enumerate(range(0, arr_proc.shape[1]-1, stepsize)): # x
print(out_i, out_j)
arr_out[out_i, out_j] = arr_in[out_i, out_j]
arr_out[out_i, out_j] = arr_in[out_i, out_j]
arr_out[out_i, out_j] = calc_slope(arr_in, ii, jj, kernel_size)
# Plot output
f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
ax1.imshow(arr_in)
ax1.set_title(title1)
ax2.imshow(arr_out)
ax2.set_title(title2)
plt.show()
##~~~~~ OTHER CONVOLUTION IDEAS
##~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~
##import time
##import scipy.signal
##import astropy
## Moving window using convolve from astropy...
#print("See here: http://docs.astropy.org/en/stable/api/astropy.convolution.convolve.html#astropy.convolution.convolve")
##~~~~~~~~~~~~~~~~~~~
## More moving window links here:
#print("https://community.esri.com/blogs/dan_patterson/2017/11/15/rolling-statistics-for-grid-data-an-alternative")
#print("https://community.esri.com/blogs/dan_patterson/2017/11/26/filter-convolve-rolling-sliding")
##~~~~~~~~~~~~~~~~~~~
## Moving window using convolve from scipy...
## See here: https://stackoverflow.com/questions/8174467/vectorized-moving-window-on-2d-array-in-numpy
#A =np.round(np.random.random_sample((1111,1111)),2)
#B = np.ones((3,3))/4
#tic = time.clock()
#C_no_pad = scipy.signal.convolve2d(A,B,mode='same')#,'fft')
#C_pad = scipy.signal.convolve2d(A,B,mode='valid', boundary='fill', fillvalue=0)#,'fft')
#toc = time.clock()
#print("That took ", toc-tic, "seconds")
#plot_comparison(C_no_pad, C_pad, title1="Unpadded", title2="Padded")
#print("WARNING")
#print("For convolution in scipy, you have to reverse the kernel - look at convolution using the astropy package for a more intuitve implementation")
#print("Also read this: https://stackoverflow.com/questions/40247760/scipy-convolve2d-outputs-wrong-values")
|
Intel-tensorflow/tensorflow
|
refs/heads/master
|
tensorflow/python/keras/activations.py
|
5
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in activation functions."""
from tensorflow.python.keras import backend
from tensorflow.python.keras.layers import advanced_activations
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import keras_export
# b/123041942
# In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras
# layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the
# internal method name is returned in serialization. This results in errors in
# model exporting and loading as Keras can't find any activation function with
# the name of `softmax_v2`.
# This dict maps the activation function name from its v2 version to its
# canonical name.
_TF_ACTIVATIONS_V2 = {
'softmax_v2': 'softmax',
}
@keras_export('keras.activations.softmax')
@dispatch.add_dispatch_support
def softmax(x, axis=-1):
"""Softmax converts a vector of values to a probability distribution.
The elements of the output vector are in range (0, 1) and sum to 1.
Each vector is handled independently. The `axis` argument sets which axis
of the input the function is applied along.
Softmax is often used as the activation for the last
layer of a classification network because the result could be interpreted as
a probability distribution.
The softmax of each vector x is computed as
`exp(x) / tf.reduce_sum(exp(x))`.
The input values in are the log-odds of the resulting probability.
Args:
x : Input tensor.
axis: Integer, axis along which the softmax normalization is applied.
Returns:
Tensor, output of softmax transformation (all values are non-negative
and sum to 1).
Examples:
**Example 1: standalone usage**
>>> inputs = tf.random.normal(shape=(32, 10))
>>> outputs = tf.keras.activations.softmax(inputs)
>>> tf.reduce_sum(outputs[0, :]) # Each sample in the batch now sums to 1
<tf.Tensor: shape=(), dtype=float32, numpy=1.0000001>
**Example 2: usage in a `Dense` layer**
>>> layer = tf.keras.layers.Dense(32, activation=tf.keras.activations.softmax)
"""
if x.shape.rank > 1:
if isinstance(axis, int):
output = nn.softmax(x, axis=axis)
else:
# nn.softmax does not support tuple axis.
e = math_ops.exp(x - math_ops.reduce_max(x, axis=axis, keepdims=True))
s = math_ops.reduce_sum(e, axis=axis, keepdims=True)
output = e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D. '
'Received input: %s' % (x,))
# Cache the logits to use for crossentropy loss.
output._keras_logits = x # pylint: disable=protected-access
return output
@keras_export('keras.activations.elu')
@dispatch.add_dispatch_support
def elu(x, alpha=1.0):
"""Exponential Linear Unit.
The exponential linear unit (ELU) with `alpha > 0` is:
`x` if `x > 0` and
`alpha * (exp(x) - 1)` if `x < 0`
The ELU hyperparameter `alpha` controls the value to which an
ELU saturates for negative net inputs. ELUs diminish the
vanishing gradient effect.
ELUs have negative values which pushes the mean of the activations
closer to zero.
Mean activations that are closer to zero enable faster learning as they
bring the gradient closer to the natural gradient.
ELUs saturate to a negative value when the argument gets smaller.
Saturation means a small derivative which decreases the variation
and the information that is propagated to the next layer.
Example Usage:
>>> import tensorflow as tf
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu',
... input_shape=(28, 28, 1)))
>>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))
>>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))
>>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))
>>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))
<tensorflow.python.keras.engine.sequential.Sequential object ...>
Args:
x: Input tensor.
alpha: A scalar, slope of negative section. `alpha` controls the value to
which an ELU saturates for negative net inputs.
Returns:
The exponential linear unit (ELU) activation function: `x` if `x > 0` and
`alpha * (exp(x) - 1)` if `x < 0`.
Reference:
[Fast and Accurate Deep Network Learning by Exponential Linear Units
(ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289)
"""
return backend.elu(x, alpha)
@keras_export('keras.activations.selu')
@dispatch.add_dispatch_support
def selu(x):
"""Scaled Exponential Linear Unit (SELU).
The Scaled Exponential Linear Unit (SELU) activation function is defined as:
- `if x > 0: return scale * x`
- `if x < 0: return scale * alpha * (exp(x) - 1)`
where `alpha` and `scale` are pre-defined constants
(`alpha=1.67326324` and `scale=1.05070098`).
Basically, the SELU activation function multiplies `scale` (> 1) with the
output of the `tf.keras.activations.elu` function to ensure a slope larger
than one for positive inputs.
The values of `alpha` and `scale` are
chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized
correctly (see `tf.keras.initializers.LecunNormal` initializer)
and the number of input units is "large enough"
(see reference paper for more information).
Example Usage:
>>> num_classes = 10 # 10-class problem
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
Args:
x: A tensor or variable to compute the activation function for.
Returns:
The scaled exponential unit activation: `scale * elu(x, alpha)`.
Notes:
- To be used together with the
`tf.keras.initializers.LecunNormal` initializer.
- To be used together with the dropout variant
`tf.keras.layers.AlphaDropout` (not regular dropout).
References:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
return nn.selu(x)
@keras_export('keras.activations.softplus')
@dispatch.add_dispatch_support
def softplus(x):
"""Softplus activation function, `softplus(x) = log(exp(x) + 1)`.
Example Usage:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.softplus(a)
>>> b.numpy()
array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00,
2.0000000e+01], dtype=float32)
Args:
x: Input tensor.
Returns:
The softplus activation: `log(exp(x) + 1)`.
"""
return math_ops.softplus(x)
@keras_export('keras.activations.softsign')
@dispatch.add_dispatch_support
def softsign(x):
"""Softsign activation function, `softsign(x) = x / (abs(x) + 1)`.
Example Usage:
>>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32)
>>> b = tf.keras.activations.softsign(a)
>>> b.numpy()
array([-0.5, 0. , 0.5], dtype=float32)
Args:
x: Input tensor.
Returns:
The softsign activation: `x / (abs(x) + 1)`.
"""
return nn.softsign(x)
@keras_export('keras.activations.swish')
@dispatch.add_dispatch_support
def swish(x):
"""Swish activation function, `swish(x) = x * sigmoid(x)`.
Swish activation function which returns `x*sigmoid(x)`.
It is a smooth, non-monotonic function that consistently matches
or outperforms ReLU on deep networks, it is unbounded above and
bounded below.
Example Usage:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.swish(a)
>>> b.numpy()
array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01,
2.0000000e+01], dtype=float32)
Args:
x: Input tensor.
Returns:
The swish activation applied to `x` (see reference paper for details).
Reference:
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
"""
return nn.swish(x)
@keras_export('keras.activations.relu')
@dispatch.add_dispatch_support
def relu(x, alpha=0., max_value=None, threshold=0):
"""Applies the rectified linear unit activation function.
With default values, this returns the standard ReLU activation:
`max(x, 0)`, the element-wise maximum of 0 and the input tensor.
Modifying default parameters allows you to use non-zero thresholds,
change the max value of the activation,
and to use a non-zero multiple of the input for values below the threshold.
For example:
>>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32)
>>> tf.keras.activations.relu(foo).numpy()
array([ 0., 0., 0., 5., 10.], dtype=float32)
>>> tf.keras.activations.relu(foo, alpha=0.5).numpy()
array([-5. , -2.5, 0. , 5. , 10. ], dtype=float32)
>>> tf.keras.activations.relu(foo, max_value=5).numpy()
array([0., 0., 0., 5., 5.], dtype=float32)
>>> tf.keras.activations.relu(foo, threshold=5).numpy()
array([-0., -0., 0., 0., 10.], dtype=float32)
Args:
x: Input `tensor` or `variable`.
alpha: A `float` that governs the slope for values lower than the
threshold.
max_value: A `float` that sets the saturation threshold (the largest value
the function will return).
threshold: A `float` giving the threshold value of the activation function
below which values will be damped or set to zero.
Returns:
A `Tensor` representing the input tensor,
transformed by the relu activation function.
Tensor will be of the same shape and dtype of input `x`.
"""
return backend.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)
@keras_export('keras.activations.gelu', v1=[])
@dispatch.add_dispatch_support
def gelu(x, approximate=False):
"""Applies the Gaussian error linear unit (GELU) activation function.
Gaussian error linear unit (GELU) computes
`x * P(X <= x)`, where `P(X) ~ N(0, 1)`.
The (GELU) nonlinearity weights inputs by their value, rather than gates
inputs by their sign as in ReLU.
For example:
>>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32)
>>> y = tf.keras.activations.gelu(x)
>>> y.numpy()
array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ],
dtype=float32)
>>> y = tf.keras.activations.gelu(x, approximate=True)
>>> y.numpy()
array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ],
dtype=float32)
Args:
x: Input tensor.
approximate: A `bool`, whether to enable approximation.
Returns:
The gaussian error linear activation:
`0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))`
if `approximate` is `True` or
`x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,
where `P(X) ~ N(0, 1)`,
if `approximate` is `False`.
Reference:
- [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
"""
return nn.gelu(x, approximate)
@keras_export('keras.activations.tanh')
@dispatch.add_dispatch_support
def tanh(x):
"""Hyperbolic tangent activation function.
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.tanh(a)
>>> b.numpy()
array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32)
Args:
x: Input tensor.
Returns:
Tensor of same shape and dtype of input `x`, with tanh activation:
`tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`.
"""
return nn.tanh(x)
@keras_export('keras.activations.sigmoid')
@dispatch.add_dispatch_support
def sigmoid(x):
"""Sigmoid activation function, `sigmoid(x) = 1 / (1 + exp(-x))`.
Applies the sigmoid activation function. For small values (<-5),
`sigmoid` returns a value close to zero, and for large values (>5)
the result of the function gets close to 1.
Sigmoid is equivalent to a 2-element Softmax, where the second element is
assumed to be zero. The sigmoid function always returns a value between
0 and 1.
For example:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.sigmoid(a)
>>> b.numpy()
array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01,
1.0000000e+00], dtype=float32)
Args:
x: Input tensor.
Returns:
Tensor with the sigmoid activation: `1 / (1 + exp(-x))`.
"""
output = nn.sigmoid(x)
# Cache the logits to use for crossentropy loss.
output._keras_logits = x # pylint: disable=protected-access
return output
@keras_export('keras.activations.exponential')
@dispatch.add_dispatch_support
def exponential(x):
"""Exponential activation function.
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.exponential(a)
>>> b.numpy()
array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32)
Args:
x: Input tensor.
Returns:
Tensor with exponential activation: `exp(x)`.
"""
return math_ops.exp(x)
@keras_export('keras.activations.hard_sigmoid')
@dispatch.add_dispatch_support
def hard_sigmoid(x):
"""Hard sigmoid activation function.
A faster approximation of the sigmoid activation.
Piecewise linear approximation of the sigmoid function.
Ref: 'https://en.wikipedia.org/wiki/Hard_sigmoid'
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.hard_sigmoid(a)
>>> b.numpy()
array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32)
Args:
x: Input tensor.
Returns:
The hard sigmoid activation, defined as:
- `if x < -2.5: return 0`
- `if x > 2.5: return 1`
- `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`
"""
return backend.hard_sigmoid(x)
@keras_export('keras.activations.linear')
@dispatch.add_dispatch_support
def linear(x):
"""Linear activation function (pass-through).
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.linear(a)
>>> b.numpy()
array([-3., -1., 0., 1., 3.], dtype=float32)
Args:
x: Input tensor.
Returns:
The input, unmodified.
"""
return x
@keras_export('keras.activations.serialize')
@dispatch.add_dispatch_support
def serialize(activation):
"""Returns the string identifier of an activation function.
Args:
activation : Function object.
Returns:
String denoting the name attribute of the input function
For example:
>>> tf.keras.activations.serialize(tf.keras.activations.tanh)
'tanh'
>>> tf.keras.activations.serialize(tf.keras.activations.sigmoid)
'sigmoid'
>>> tf.keras.activations.serialize('abcd')
Traceback (most recent call last):
...
ValueError: ('Cannot serialize', 'abcd')
Raises:
ValueError: The input function is not a valid one.
"""
if (hasattr(activation, '__name__') and
activation.__name__ in _TF_ACTIVATIONS_V2):
return _TF_ACTIVATIONS_V2[activation.__name__]
return serialize_keras_object(activation)
# Add additional globals so that deserialize can find these common activation
# functions
leaky_relu = nn.leaky_relu
log_softmax = nn.log_softmax
relu6 = nn.relu6
silu = nn.swish
@keras_export('keras.activations.deserialize')
@dispatch.add_dispatch_support
def deserialize(name, custom_objects=None):
"""Returns activation function given a string identifier.
Args:
name: The name of the activation function.
custom_objects: Optional `{function_name: function_obj}`
dictionary listing user-provided activation functions.
Returns:
Corresponding activation function.
For example:
>>> tf.keras.activations.deserialize('linear')
<function linear at 0x1239596a8>
>>> tf.keras.activations.deserialize('sigmoid')
<function sigmoid at 0x123959510>
>>> tf.keras.activations.deserialize('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function:abcd
Raises:
ValueError: `Unknown activation function` if the input string does not
denote any defined Tensorflow activation function.
"""
globs = globals()
# only replace missing activations
advanced_activations_globs = advanced_activations.get_globals()
for key, val in advanced_activations_globs.items():
if key not in globs:
globs[key] = val
return deserialize_keras_object(
name,
module_objects=globs,
custom_objects=custom_objects,
printable_module_name='activation function')
@keras_export('keras.activations.get')
@dispatch.add_dispatch_support
def get(identifier):
"""Returns function.
Args:
identifier: Function or string
Returns:
Function corresponding to the input string or input function.
For example:
>>> tf.keras.activations.get('softmax')
<function softmax at 0x1222a3d90>
>>> tf.keras.activations.get(tf.keras.activations.softmax)
<function softmax at 0x1222a3d90>
>>> tf.keras.activations.get(None)
<function linear at 0x1239596a8>
>>> tf.keras.activations.get(abs)
<built-in function abs>
>>> tf.keras.activations.get('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function:abcd
Raises:
ValueError: Input is an unknown function or string, i.e., the input does
not denote any defined function.
"""
if identifier is None:
return linear
if isinstance(identifier, str):
identifier = str(identifier)
return deserialize(identifier)
elif isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise TypeError(
'Could not interpret activation function identifier: {}'.format(
identifier))
|
eleonrk/SickRage
|
refs/heads/master
|
sickbeard/providers/ncore.py
|
2
|
# coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Fre$
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or$
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import json
import re
from sickbeard import logger, tvcache
from sickchill.helper.common import convert_size, try_int
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class NcoreProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "ncore.cc")
self.username = None
self.password = None
self.minseed = None
self.minleech = None
categories = [
'xvidser_hun', 'xvidser',
'dvdser_hun', 'dvdser',
'hdser_hun', 'hdser'
]
categories = '&kivalasztott_tipus=' + ','.join([x for x in categories])
self.url = 'https://ncore.cc/'
self.urls = {
'login': 'https://ncore.cc/login.php',
'search': ('https://ncore.cc/torrents.php?nyit_sorozat_resz=true&{cats}&mire=%s&miben=name'
'&tipus=kivalasztottak_kozott&submit.x=0&submit.y=0&submit=Ok'
'&tags=&searchedfrompotato=true&jsons=true').format(cats=categories),
}
self.cache = tvcache.TVCache(self)
def login(self):
login_params = {
'nev': self.username,
'pass': self.password,
'submitted': '1',
}
response = self.get_url(self.urls["login"], post_data=login_params, returns="text")
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if re.search('images/warning.png', response):
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-branches, too-many-locals, too-many-statements
results = []
if not self.login():
return results
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != "RSS":
logger.log("Search string: {0}".format(search_string.decode("utf-8")), logger.DEBUG)
url = self.urls['search'] % (search_string)
data = self.get_url(url, returns="text")
try:
parsed_json = json.loads(data)
except ValueError as e:
continue
if not isinstance(parsed_json, dict):
logger.log("No data returned from provider", logger.DEBUG)
continue
torrent_results = parsed_json['total_results']
if not torrent_results:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
logger.log('Number of torrents found on nCore = ' + str(torrent_results), logger.INFO)
for item in parsed_json['results']:
try:
title = item.pop("release_name")
download_url = item.pop("download_url")
if not all([title, download_url]):
continue
seeders = item.pop("seeders")
leechers = item.pop("leechers")
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = item.pop("size", -1)
size = convert_size(torrent_size) or -1
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers with a file size {3}".format(title, seeders, leechers, size), logger.DEBUG)
result = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
items.append(result)
except StandardError:
continue
# For each search mode sort all the items by seeders
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = NcoreProvider()
|
Stanford-Online/edx-platform
|
refs/heads/master
|
common/lib/capa/setup.py
|
25
|
from setuptools import find_packages, setup
setup(
name="capa",
version="0.1",
packages=find_packages(exclude=["tests"]),
install_requires=[
"setuptools",
"lxml",
"pytz"
],
)
|
AriZuu/micropython
|
refs/heads/master
|
tests/extmod/machine_pinbase.py
|
24
|
try:
import umachine as machine
except ImportError:
import machine
try:
machine.PinBase
except AttributeError:
print("SKIP")
raise SystemExit
class MyPin(machine.PinBase):
def __init__(self):
print("__init__")
self.v = False
def value(self, v=None):
print("value:", v)
if v is None:
self.v = not self.v
return int(self.v)
p = MyPin()
print(p.value())
print(p.value())
print(p.value())
p.value(1)
p.value(0)
|
Bryukh/ajenti
|
refs/heads/master
|
plugins/squid/__init__.py
|
12
|
MODULES = ['api', 'config', 'backend', 'main', 'ui_acls', 'ui_bindings', 'ui_http_access', 'ui_refresh_patterns']
DEPS = [
(['any'],
[
('plugin', 'services'),
('app', 'Squid', 'squid')
])
]
NAME = 'Squid'
PLATFORMS = ['any']
DESCRIPTION = 'Control Squid caching proxy server'
VERSION = '1'
GENERATION = 1
AUTHOR = 'Ajenti team'
HOMEPAGE = 'http://ajenti.org'
|
ThQ/qd
|
refs/heads/master
|
translaters/sir_pp.py
|
1
|
import sys
class CommandHandler:
def __init__(self, preprocessor):
self.output = ""
self.pp = preprocessor
def get_output(self):
return self.output
def initialize(self, at, block_opcode_pos, args):
return False
def put_command(self, at, command, args):
return -1
def put_string(self, at, string):
return ""
class AtHandler (CommandHandler):
def initialize(self, at, block_opcode_pos, args):
# Why -1 exactly ??
self.pp.define_variable(at, ("@@" + args.strip()), str(block_opcode_pos - 1))
return False
class DefineHandler (CommandHandler):
def initialize(self, at, block_opcode_pos, args):
space_pos = args.find(" ")
name = ""
value = ""
if space_pos != -1:
name = args[0:space_pos].strip()
value = args[space_pos + 1:].strip()
else:
name = args.strip()
value = ""
if name != "":
self.pp.define_variable(at, name, value)
else:
self.pp.warn(at, "Define does not actually define anything.")
return False
class UndefineHandler (CommandHandler):
def initialize(self, at, block_opcode_pos, args):
space_pos = args.find(" ")
name = ""
value = ""
if space_pos != -1:
name = args[0:space_pos].strip()
value = args[space_pos + 1:].strip()
else:
name = args.strip()
value = ""
if name != "":
self.pp.undefine_variable(at, name)
else:
self.pp.warn(at, "Undefine must be passed a variable name.")
return False
class IfDefHandler (CommandHandler):
def __init__(self, pp):
CommandHandler.__init__(self, pp)
self.log = False
self.variable_name = ""
self.ifs = 0
def initialize(self, at, block_opcode_pos, args):
self.variable_name = args
if self.variable_name in self.pp.variables:
self.log = True
return True
def put_command(self, at, command, args):
result = -1
if command == "else":
if self.ifs == 0:
self.log = not self.log
result = 1
elif command == "ifdef":
if self.log:
result = 0
else:
result = 1
self.ifs += 1
elif command == "endif":
if self.log:
result = -1
else:
result = 1
self.ifs -= 1
else:
if self.log:
result = 0
else:
result = 1
##print self, "put_command", command, "| result=", result, "| log=", self.log
return result
def put_string(self, at, string):
if self.log:
return string + "\n"
else:
return ""
class SirPreprocessor:
def __init__(self):
self.handler = None
self.handlers = []
self.variables = {}
def define_variable(self, at, name, value):
if not name in self.variables:
self.variables[name] = value
else:
self.warn(at, "Redefining variable [" + name + "].")
def get_handler(self, name):
h = None
if name == "define":
h = DefineHandler(self)
elif name == "ifdef":
h = IfDefHandler(self)
elif name == "undefine":
h = UndefineHandler(self)
elif name == "at":
h = AtHandler(self)
return h
def preprocess(self, file_path):
file = open(file_path, "r")
if file != None:
content = file.read()
file.close()
return self.preprocess_string(content)
else:
return ""
def preprocess_string(self, content):
output = ""
lines = content.splitlines()
lineno = 1
block_opcode_pos = 0
for line in lines:
line = line.strip()
if line != "" and line[0] != "#" :
if line.startswith("block:new"):
block_opcode_pos = -1
line = line.strip()
if line.startswith("@@"):
line = "%% at " + line[2:]
if line.startswith("%% "):
space_pos = line.find(" ", 4)
command = ""
args = ""
if space_pos != -1:
command = line[3:space_pos]
args = line[space_pos + 1:]
else:
command = line[3:]
if self.handler != None:
todo = self.handler.put_command(lineno, command, args)
if todo == -1 :
if len(self.handlers) > 0:
self.handlers.pop()
if len(self.handlers) > 0:
self.handler = self.handlers[-1]
else:
self.handler = None
elif todo == 0:
self.handler = self.get_handler(command)
if self.handler != None:
if self.handler.initialize(lineno, block_opcode_pos, args):
self.handlers.append(self.handler)
else:
self.handler = None
else:
self.warn(lineno, "Unknown command [" + command + "].")
else:
self.handler = self.get_handler(command)
if self.handler != None:
if self.handler.initialize(lineno, block_opcode_pos, args):
self.handlers.append(self.handler)
else:
self.handler = None
else:
self.warn(lineno, "Unknown command [" + command + "].")
else:
new_line = ""
if self.handler != None:
new_line += self.handler.put_string(lineno, line)
else:
new_line += line + "\n"
block_opcode_pos += 1
output += new_line
lineno += 1
output = self.replace_variables(output)
return output
def replace_variables(self, content):
result = ""
last_at = 0
tag_start = "{{"
tag_start_len = len(tag_start)
tag_end = "}}"
tag_end_len = len(tag_end)
while True:
var_start = content.find(tag_start, last_at)
if var_start != -1:
var_end = content.find(tag_end, var_start)
if var_end != -1:
result += content[last_at:var_start]
var_name = content[var_start + tag_start_len:var_end].strip()
if var_name in self.variables:
result += self.variables[var_name]
else:
print self.variables
self.error("?", "Unknown variable [" + var_name + "].")
else:
break;
last_at = var_end + tag_end_len
result += content[last_at:]
return result
def undefine_variable(self, at, name):
if name in self.variables:
self.variables.pop(name)
def warn(self, lineno, msg):
sys.stderr.write("[SirPP] Warning @" + str(lineno) + ": " + msg + "\n")
def error(self, lineno, msg):
sys.stderr.write("[SirPP] Error @" + str(lineno) + ": " + msg + "\n")
exit(1)
if __name__ == "__main__":
if len(sys.argv) >= 2:
pp = SirPreprocessor()
sir = pp.preprocess(sys.argv[1])
print sir,
|
nycholas/ask-undrgz
|
refs/heads/master
|
src/ask-undrgz/django/contrib/sitemaps/views.py
|
55
|
from django.http import HttpResponse, Http404
from django.template import loader
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.utils.encoding import smart_str
from django.core.paginator import EmptyPage, PageNotAnInteger
def index(request, sitemaps):
current_site = Site.objects.get_current()
sites = []
protocol = request.is_secure() and 'https' or 'http'
for section, site in sitemaps.items():
if callable(site):
pages = site().paginator.num_pages
else:
pages = site.paginator.num_pages
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap', kwargs={'section': section})
sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url))
if pages > 1:
for page in range(2, pages+1):
sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page))
xml = loader.render_to_string('sitemap_index.xml', {'sitemaps': sites})
return HttpResponse(xml, mimetype='application/xml')
def sitemap(request, sitemaps, section=None):
maps, urls = [], []
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps.append(sitemaps[section])
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
for site in maps:
try:
if callable(site):
urls.extend(site().get_urls(page))
else:
urls.extend(site.get_urls(page))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
xml = smart_str(loader.render_to_string('sitemap.xml', {'urlset': urls}))
return HttpResponse(xml, mimetype='application/xml')
|
FokkeZB/titanium_mobile
|
refs/heads/master
|
node_modules/ioslib/node_modules/node-ios-device/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py
|
395
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'Pure',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
|
mikeumus/django-admin-views
|
refs/heads/master
|
example_project/example_project/urls.py
|
1
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'example_project.views.home', name='home'),
# url(r'^example_project/', include('example_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
jeffzheng1/tensorflow
|
refs/heads/master
|
tensorflow/python/util/future_api_test.py
|
173
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for future_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# pylint: disable=unused-import
from tensorflow.python.util import future_api
# pylint: enable=unused-import
class ExampleParserConfigurationTest(tf.test.TestCase):
def testBasic(self):
self.assertFalse(hasattr(tf, 'arg_max'))
self.assertTrue(hasattr(tf, 'argmax'))
if __name__ == '__main__':
tf.test.main()
|
Kayra/Thunder
|
refs/heads/master
|
server/routines/migrations/0002_auto_20160216_2236.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('routines', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='exercise',
options={'ordering': ['position']},
),
migrations.AlterModelOptions(
name='routine',
options={'ordering': ['name']},
),
migrations.AddField(
model_name='routine',
name='user',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL, related_name='routines'),
preserve_default=False,
),
]
|
WhySoGeeky/DroidPot
|
refs/heads/master
|
venv/lib/python2.7/site-packages/jinja2/loaders.py
|
333
|
# -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import open_if_exists, internalcode
from jinja2._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order::
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
To follow symbolic links, set the *followlinks* parameter to ``True``::
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
.. versionchanged:: 2.8+
The *followlinks* parameter was added.
"""
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
|
PandaWei/tp-qemu
|
refs/heads/master
|
generic/tests/yum_update.py
|
5
|
import logging
import time
def internal_yum_update(session, command, prompt, timeout):
"""
Helper function to perform the yum update test.
:param session: shell session stablished to the host
:param command: Command to be sent to the shell session
:param prompt: Machine prompt
:param timeout: How long to wait until we get an appropriate output from
the shell session.
"""
session.sendline(command)
end_time = time.time() + timeout
while time.time() < end_time:
match = session.read_until_last_line_matches(
["[Ii]s this [Oo][Kk]", prompt],
timeout=timeout)[0]
if match == 0:
logging.info("Got 'Is this ok'; sending 'y'")
session.sendline("y")
elif match == 1:
logging.info("Got shell prompt")
return True
else:
logging.info("Timeout or process exited")
return False
def run(test, params, env):
"""
Runs yum update and yum update kernel on the remote host (yum enabled
hosts only).
:param test: kvm test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
internal_yum_update(session, "yum update --nogpgcheck",
params.get("shell_prompt"), 600)
internal_yum_update(session, "yum update kernel --nogpgcheck",
params.get("shell_prompt"), 600)
session.close()
|
ox-it/python-sharepoint
|
refs/heads/master
|
sharepoint/site.py
|
2
|
import functools
from lxml import etree
from six.moves.urllib.request import Request
from six.moves.urllib.parse import urljoin
from .lists import SharePointLists
from .users import SharePointUsers
from .xml import soap_body, namespaces, OUT
class SharePointSite(object):
def __init__(self, url, opener, timeout=None):
if not url.endswith('/'):
url += '/'
self.opener = opener
self.opener.base_url = url
self.opener.post_soap = self.post_soap
self.opener.relative = functools.partial(urljoin, url)
self.timeout = timeout
def post_soap(self, url, xml, soapaction=None):
url = self.opener.relative(url)
request = Request(url, etree.tostring(soap_body(xml)))
request.add_header('Content-type', 'text/xml; charset=utf-8')
if soapaction:
request.add_header('Soapaction', soapaction)
response = self.opener.open(request, timeout=self.timeout)
return etree.parse(response).xpath('/soap:Envelope/soap:Body/*', namespaces=namespaces)[0]
@property
def lists(self):
if not hasattr(self, '_lists'):
self._lists = SharePointLists(self.opener)
return self._lists
@property
def users(self):
if not hasattr(self, '_users'):
self._users = SharePointUsers(self.opener)
return self._users
def as_xml(self, include_lists=False, include_users=False, **kwargs):
xml = OUT.site(url=self.opener.base_url)
if include_lists or kwargs.get('list_names'):
xml.append(self.lists.as_xml(**kwargs))
if include_users:
if 'user_ids' not in kwargs:
kwargs['user_ids'] = set(xml.xpath('.//sharepoint:user/@id', namespaces=namespaces))
xml.append(self.users.as_xml(**kwargs))
return xml
|
wfhio/tramcar
|
refs/heads/master
|
job_board/migrations/0009_auto_20161006_0234.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-06 02:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job_board', '0008_auto_20161001_2110'),
]
operations = [
migrations.AlterField(
model_name='job',
name='expired_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='job',
name='paid_at',
field=models.DateTimeField(blank=True, null=True),
),
]
|
rikuayanokozy/closure-templates
|
refs/heads/master
|
python/generated_sanitize.py
|
5
|
# START GENERATED CODE FOR ESCAPERS.
from __future__ import unicode_literals
import re
import urllib
try:
str = unicode
except NameError:
pass
def escape_uri_helper(v):
return urllib.quote(str(v))
_ESCAPE_MAP_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE = {
'\x00': '�',
'\x09': '	',
'\x0a': ' ',
'\x0b': '',
'\x0c': '',
'\x0d': ' ',
' ': ' ',
'\"': '"',
'&': '&',
'\'': ''',
'-': '-',
'/': '/',
'<': '<',
'=': '=',
'>': '>',
'`': '`',
'\x85': '…',
'\xa0': ' ',
'\u2028': '
',
'\u2029': '
'
}
def _REPLACER_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE(match):
ch = match.group(0)
return _ESCAPE_MAP_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE[ch]
_ESCAPE_MAP_FOR_ESCAPE_JS_STRING__AND__ESCAPE_JS_REGEX = {
'\x00': '\\x00',
'\x08': '\\x08',
'\x09': '\\t',
'\x0a': '\\n',
'\x0b': '\\x0b',
'\x0c': '\\f',
'\x0d': '\\r',
'\"': '\\x22',
'$': '\\x24',
'&': '\\x26',
'\'': '\\x27',
'(': '\\x28',
')': '\\x29',
'*': '\\x2a',
'+': '\\x2b',
',': '\\x2c',
'-': '\\x2d',
'.': '\\x2e',
'/': '\\/',
':': '\\x3a',
'<': '\\x3c',
'=': '\\x3d',
'>': '\\x3e',
'?': '\\x3f',
'[': '\\x5b',
'\\': '\\\\',
']': '\\x5d',
'^': '\\x5e',
'{': '\\x7b',
'|': '\\x7c',
'}': '\\x7d',
'\x85': '\\x85',
'\u2028': '\\u2028',
'\u2029': '\\u2029'
}
def _REPLACER_FOR_ESCAPE_JS_STRING__AND__ESCAPE_JS_REGEX(match):
ch = match.group(0)
return _ESCAPE_MAP_FOR_ESCAPE_JS_STRING__AND__ESCAPE_JS_REGEX[ch]
_ESCAPE_MAP_FOR_ESCAPE_CSS_STRING = {
'\x00': '\\0 ',
'\x08': '\\8 ',
'\x09': '\\9 ',
'\x0a': '\\a ',
'\x0b': '\\b ',
'\x0c': '\\c ',
'\x0d': '\\d ',
'\"': '\\22 ',
'&': '\\26 ',
'\'': '\\27 ',
'(': '\\28 ',
')': '\\29 ',
'*': '\\2a ',
'/': '\\2f ',
':': '\\3a ',
';': '\\3b ',
'<': '\\3c ',
'=': '\\3d ',
'>': '\\3e ',
'@': '\\40 ',
'\\': '\\5c ',
'{': '\\7b ',
'}': '\\7d ',
'\x85': '\\85 ',
'\xa0': '\\a0 ',
'\u2028': '\\2028 ',
'\u2029': '\\2029 '
}
def _REPLACER_FOR_ESCAPE_CSS_STRING(match):
ch = match.group(0)
return _ESCAPE_MAP_FOR_ESCAPE_CSS_STRING[ch]
_ESCAPE_MAP_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI = {
'\x00': '%00',
'\x01': '%01',
'\x02': '%02',
'\x03': '%03',
'\x04': '%04',
'\x05': '%05',
'\x06': '%06',
'\x07': '%07',
'\x08': '%08',
'\x09': '%09',
'\x0a': '%0A',
'\x0b': '%0B',
'\x0c': '%0C',
'\x0d': '%0D',
'\x0e': '%0E',
'\x0f': '%0F',
'\x10': '%10',
'\x11': '%11',
'\x12': '%12',
'\x13': '%13',
'\x14': '%14',
'\x15': '%15',
'\x16': '%16',
'\x17': '%17',
'\x18': '%18',
'\x19': '%19',
'\x1a': '%1A',
'\x1b': '%1B',
'\x1c': '%1C',
'\x1d': '%1D',
'\x1e': '%1E',
'\x1f': '%1F',
' ': '%20',
'\"': '%22',
'\'': '%27',
'(': '%28',
')': '%29',
'<': '%3C',
'>': '%3E',
'\\': '%5C',
'{': '%7B',
'}': '%7D',
'\x7f': '%7F',
'\x85': '%C2%85',
'\xa0': '%C2%A0',
'\u2028': '%E2%80%A8',
'\u2029': '%E2%80%A9',
'\uff01': '%EF%BC%81',
'\uff03': '%EF%BC%83',
'\uff04': '%EF%BC%84',
'\uff06': '%EF%BC%86',
'\uff07': '%EF%BC%87',
'\uff08': '%EF%BC%88',
'\uff09': '%EF%BC%89',
'\uff0a': '%EF%BC%8A',
'\uff0b': '%EF%BC%8B',
'\uff0c': '%EF%BC%8C',
'\uff0f': '%EF%BC%8F',
'\uff1a': '%EF%BC%9A',
'\uff1b': '%EF%BC%9B',
'\uff1d': '%EF%BC%9D',
'\uff1f': '%EF%BC%9F',
'\uff20': '%EF%BC%A0',
'\uff3b': '%EF%BC%BB',
'\uff3d': '%EF%BC%BD'
}
def _REPLACER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI(match):
ch = match.group(0)
return _ESCAPE_MAP_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI[ch]
_MATCHER_FOR_ESCAPE_HTML = re.compile(r'[\x00\x22\x26\x27\x3c\x3e]', re.U)
_MATCHER_FOR_NORMALIZE_HTML = re.compile(r'[\x00\x22\x27\x3c\x3e]', re.U)
_MATCHER_FOR_ESCAPE_HTML_NOSPACE = re.compile(r'[\x00\x09-\x0d \x22\x26\x27\x2d\/\x3c-\x3e`\x85\xa0\u2028\u2029]', re.U)
_MATCHER_FOR_NORMALIZE_HTML_NOSPACE = re.compile(r'[\x00\x09-\x0d \x22\x27\x2d\/\x3c-\x3e`\x85\xa0\u2028\u2029]', re.U)
_MATCHER_FOR_ESCAPE_JS_STRING = re.compile(r'[\x00\x08-\x0d\x22\x26\x27\/\x3c-\x3e\\\x85\u2028\u2029]', re.U)
_MATCHER_FOR_ESCAPE_JS_REGEX = re.compile(r'[\x00\x08-\x0d\x22\x24\x26-\/\x3a\x3c-\x3f\x5b-\x5e\x7b-\x7d\x85\u2028\u2029]', re.U)
_MATCHER_FOR_ESCAPE_CSS_STRING = re.compile(r'[\x00\x08-\x0d\x22\x26-\x2a\/\x3a-\x3e@\\\x7b\x7d\x85\xa0\u2028\u2029]', re.U)
_MATCHER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI = re.compile(r'[\x00- \x22\x27-\x29\x3c\x3e\\\x7b\x7d\x7f\x85\xa0\u2028\u2029\uff01\uff03\uff04\uff06-\uff0c\uff0f\uff1a\uff1b\uff1d\uff1f\uff20\uff3b\uff3d]', re.U)
_FILTER_FOR_FILTER_CSS_VALUE = re.compile(r"""^(?!-*(?:expression|(?:moz-)?binding))(?:[.#]?-?(?:[_a-z0-9-]+)(?:-[_a-z0-9-]+)*-?|-?(?:[0-9]+(?:\.[0-9]*)?|\.[0-9]+)(?:[a-z]{1,2}|%)?|!important|)\Z""", re.U | re.I)
_FILTER_FOR_FILTER_NORMALIZE_URI = re.compile(r"""^(?![^#?]*/(?:\.|%2E){2}(?:[/?#]|\Z))(?:(?:https?|mailto):|[^&:/?#]*(?:[/?#]|\Z))""", re.U | re.I)
_FILTER_FOR_FILTER_IMAGE_DATA_URI = re.compile(r"""^data:image/(?:bmp|gif|jpe?g|png|tiff|webp);base64,[a-z0-9+/]+=*\Z""", re.U | re.I)
_FILTER_FOR_FILTER_HTML_ATTRIBUTES = re.compile(r"""^(?!style|on|action|archive|background|cite|classid|codebase|data|dsync|href|longdesc|src|usemap)(?:[a-z0-9_$:-]*)\Z""", re.U | re.I)
_FILTER_FOR_FILTER_HTML_ELEMENT_NAME = re.compile(r"""^(?!script|style|title|textarea|xmp|no)[a-z0-9_$:-]*\Z""", re.U | re.I)
def escape_html_helper(value):
value = str(value)
return _MATCHER_FOR_ESCAPE_HTML.sub(
_REPLACER_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE, value)
def normalize_html_helper(value):
value = str(value)
return _MATCHER_FOR_NORMALIZE_HTML.sub(
_REPLACER_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE, value)
def escape_html_nospace_helper(value):
value = str(value)
return _MATCHER_FOR_ESCAPE_HTML_NOSPACE.sub(
_REPLACER_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE, value)
def normalize_html_nospace_helper(value):
value = str(value)
return _MATCHER_FOR_NORMALIZE_HTML_NOSPACE.sub(
_REPLACER_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE, value)
def escape_js_string_helper(value):
value = str(value)
return _MATCHER_FOR_ESCAPE_JS_STRING.sub(
_REPLACER_FOR_ESCAPE_JS_STRING__AND__ESCAPE_JS_REGEX, value)
def escape_js_regex_helper(value):
value = str(value)
return _MATCHER_FOR_ESCAPE_JS_REGEX.sub(
_REPLACER_FOR_ESCAPE_JS_STRING__AND__ESCAPE_JS_REGEX, value)
def escape_css_string_helper(value):
value = str(value)
return _MATCHER_FOR_ESCAPE_CSS_STRING.sub(
_REPLACER_FOR_ESCAPE_CSS_STRING, value)
def filter_css_value_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_CSS_VALUE.search(value):
return 'zSoyz'
return value
def normalize_uri_helper(value):
value = str(value)
return _MATCHER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI.sub(
_REPLACER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI, value)
def filter_normalize_uri_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_NORMALIZE_URI.search(value):
return '#zSoyz'
return _MATCHER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI.sub(
_REPLACER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI, value)
def filter_image_data_uri_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_IMAGE_DATA_URI.search(value):
return 'data:image/gif;base64,zSoyz'
return value
def filter_html_attributes_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_HTML_ATTRIBUTES.search(value):
return 'zSoyz'
return value
def filter_html_element_name_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_HTML_ELEMENT_NAME.search(value):
return 'zSoyz'
return value
_HTML_TAG_REGEX = re.compile(r"""<(?:!|/?([a-zA-Z][a-zA-Z0-9:\-]*))(?:[^>'"]|"[^"]*"|'[^']*')*>""", re.U)
_LT_REGEX = re.compile('<')
_SAFE_TAG_WHITELIST = ('b', 'br', 'em', 'i', 's', 'sub', 'sup', 'u')
# END GENERATED CODE
|
fedackb/surface-constraint-tools
|
refs/heads/master
|
properties/ShrinkwrapProps.py
|
1
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
import bpy
class ShrinkwrapProps(bpy.types.PropertyGroup):
data_path =(
"user_preferences.addons['{0}'].preferences.shrinkwrap"
).format(__package__.split(".")[0])
# Shrinkwrap Settings
only_selected_are_affected = bpy.props.BoolProperty(
name = "Selected Only",
description = (
"Limit the shrinkwrap operation's effect to the selected " +
"vertices only."
),
default = False
)
# UI Visibility
settings_ui_is_visible = bpy.props.BoolProperty(
name = "Settings UI Visibility",
description = "Show/hide the Settings UI.",
default = False
)
|
LaoZhongGu/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_sys.py
|
6
|
import unittest, test.support
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit as exc:
self.assertEqual(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit as exc:
self.assertEqual(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit as exc:
self.assertEqual(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected, env=None):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (ascii(stderr), ascii(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", env=env)
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'fatal error if run with a trace function')
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.suppress_crash_popup():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading, _thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(_thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = _thread.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 5)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug", "division_warning",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
elif sys.platform == 'win32':
expected = 'mbcs'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('PP') + gc_header_size)
def test_default(self):
vsize = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), vsize('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('3P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('iPP') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
check(get_cell().__code__, size('5i8Pi3P'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('2PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('2P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('3P2P' + 8*'P2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('3P2P' + 8*'P2P') + 16*struct.calcsize('P2P'))
# dictionary-keyiterator
check({}.keys(), size('P'))
# dictionary-valueiterator
check({}.values(), size('P'))
# dictionary-itemiterator
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2PPP'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5P 2P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5P 2P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5P 2P2PP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('l3P'))
# reverse
check(reversed(''), size('PP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('11P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('P'))
# classmethod
check(bar, size('P'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pi2P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('lP'))
# long
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# memoryview
check(memoryview(b''), size('PP2P2i7P'))
# module
check(unittest, size('3P'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('PP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3P2P' + PySet_MINSIZE*'PP' + 'PP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size('P3P'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = vsize('P2P15Pl4PP9PP11PI') + struct.calcsize('16Pi17P 3P 10P 2P 2P')
check(int, s)
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# unicode
usize = len('\0'.encode('unicode-internal'))
samples = ['', '1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
basicsize = size('PPPiP') + usize * (len(s) + 1)
check(s, basicsize)
# weakref
import weakref
check(weakref.ref(int), size('2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pl2P'))
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
egaxegax/django-dbcartajs
|
refs/heads/master
|
django/core/cache/__init__.py
|
98
|
"""
Caching framework.
This package defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract BaseCache class in django.core.cache.backends.base.
Client code should not access a cache backend directly; instead it should
either use the "cache" variable made available here, or it should use the
get_cache() function made available here. get_cache() takes a backend URI
(e.g. "memcached://127.0.0.1:11211/") and returns an instance of a backend
cache class.
See docs/topics/cache.txt for information on the public API.
"""
try:
from urllib.parse import parse_qsl
except ImportError: # Python 2
from urlparse import parse_qsl
from django.conf import settings
from django.core import signals
from django.core.cache.backends.base import (
InvalidCacheBackendError, CacheKeyWarning, BaseCache)
from django.core.exceptions import ImproperlyConfigured
from django.utils import importlib
__all__ = [
'get_cache', 'cache', 'DEFAULT_CACHE_ALIAS'
]
# Name for use in settings file --> name of module in "backends" directory.
# Any backend scheme that is not in this dictionary is treated as a Python
# import path to a custom backend.
BACKENDS = {
'memcached': 'memcached',
'locmem': 'locmem',
'file': 'filebased',
'db': 'db',
'dummy': 'dummy',
}
DEFAULT_CACHE_ALIAS = 'default'
def parse_backend_uri(backend_uri):
"""
Converts the "backend_uri" into a cache scheme ('db', 'memcached', etc), a
host and any extra params that are required for the backend. Returns a
(scheme, host, params) tuple.
"""
if backend_uri.find(':') == -1:
raise InvalidCacheBackendError("Backend URI must start with scheme://")
scheme, rest = backend_uri.split(':', 1)
if not rest.startswith('//'):
raise InvalidCacheBackendError("Backend URI must start with scheme://")
host = rest[2:]
qpos = rest.find('?')
if qpos != -1:
params = dict(parse_qsl(rest[qpos+1:]))
host = rest[2:qpos]
else:
params = {}
if host.endswith('/'):
host = host[:-1]
return scheme, host, params
if DEFAULT_CACHE_ALIAS not in settings.CACHES:
raise ImproperlyConfigured("You must define a '%s' cache" % DEFAULT_CACHE_ALIAS)
def parse_backend_conf(backend, **kwargs):
"""
Helper function to parse the backend configuration
that doesn't use the URI notation.
"""
# Try to get the CACHES entry for the given backend name first
conf = settings.CACHES.get(backend, None)
if conf is not None:
args = conf.copy()
args.update(kwargs)
backend = args.pop('BACKEND')
location = args.pop('LOCATION', '')
return backend, location, args
else:
try:
# Trying to import the given backend, in case it's a dotted path
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError, ValueError):
raise InvalidCacheBackendError("Could not find backend '%s'" % backend)
location = kwargs.pop('LOCATION', '')
return backend, location, kwargs
def get_cache(backend, **kwargs):
"""
Function to load a cache backend dynamically. This is flexible by design
to allow different use cases:
To load a backend with the old URI-based notation::
cache = get_cache('locmem://')
To load a backend that is pre-defined in the settings::
cache = get_cache('default')
To load a backend with its dotted import path,
including arbitrary options::
cache = get_cache('django.core.cache.backends.memcached.MemcachedCache', **{
'LOCATION': '127.0.0.1:11211', 'TIMEOUT': 30,
})
"""
try:
if '://' in backend:
# for backwards compatibility
backend, location, params = parse_backend_uri(backend)
if backend in BACKENDS:
backend = 'django.core.cache.backends.%s' % BACKENDS[backend]
params.update(kwargs)
mod = importlib.import_module(backend)
backend_cls = mod.CacheClass
else:
backend, location, params = parse_backend_conf(backend, **kwargs)
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError) as e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
cache = backend_cls(location, params)
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If the cache provides a close() method, wire it up
# here.
if hasattr(cache, 'close'):
signals.request_finished.connect(cache.close)
return cache
cache = get_cache(DEFAULT_CACHE_ALIAS)
|
bancek/egradebook
|
refs/heads/master
|
src/lib/django/contrib/comments/signals.py
|
425
|
"""
Signals relating to comments.
"""
from django.dispatch import Signal
# Sent just before a comment will be posted (after it's been approved and
# moderated; this can be used to modify the comment (in place) with posting
# details or other such actions. If any receiver returns False the comment will be
# discarded and a 403 (not allowed) response. This signal is sent at more or less
# the same time (just before, actually) as the Comment object's pre-save signal,
# except that the HTTP request is sent along with this signal.
comment_will_be_posted = Signal(providing_args=["comment", "request"])
# Sent just after a comment was posted. See above for how this differs
# from the Comment object's post-save signal.
comment_was_posted = Signal(providing_args=["comment", "request"])
# Sent after a comment was "flagged" in some way. Check the flag to see if this
# was a user requesting removal of a comment, a moderator approving/removing a
# comment, or some other custom user flag.
comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
|
iarroyof/nlp-pipeline
|
refs/heads/master
|
attention_lstm_.py
|
1
|
"""
A keras attention layer that wraps RNN layers.
Based on tensorflows [attention_decoder](https://github.com/tensorflow/tensorflow/blob/c8a45a8e236776bed1d14fd71f3b6755bd63cc58/tensorflow/python/ops/seq2seq.py#L506)
and [Grammar as a Foreign Language](https://arxiv.org/abs/1412.7449).
date: 20161101
author: wassname
url: https://gist.github.com/wassname/5292f95000e409e239b9dc973295327a
"""
from keras import backend as K
from keras.engine import InputSpec
from keras.layers import LSTM, activations, Wrapper, Recurrent
class Attention(Wrapper):
"""
This wrapper will provide an attention layer to a recurrent layer.
# Arguments:
layer: `Recurrent` instance with consume_less='gpu' or 'mem'
# Examples:
```python
model = Sequential()
model.add(LSTM(10, return_sequences=True), batch_input_shape=(4, 5, 10))
model.add(TFAttentionRNNWrapper(LSTM(10, return_sequences=True, consume_less='gpu')))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
# References
- [Grammar as a Foreign Language](https://arxiv.org/abs/1412.7449)
"""
def __init__(self, layer, **kwargs):
assert isinstance(layer, Recurrent)
if layer.get_config()['consume_less']=='cpu':
raise Exception("AttentionLSTMWrapper doesn't support RNN's with consume_less='cpu'")
self.supports_masking = True
super(Attention, self).__init__(layer, **kwargs)
def build(self, input_shape):
assert len(input_shape) >= 3
self.input_spec = [InputSpec(shape=input_shape)]
nb_samples, nb_time, input_dim = input_shape
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
super(Attention, self).build()
self.W1 = self.layer.init((input_dim, input_dim, 1, 1), name='{}_W1'.format(self.name))
self.W2 = self.layer.init((self.layer.output_dim, input_dim), name='{}_W2'.format(self.name))
self.b2 = K.zeros((input_dim,), name='{}_b2'.format(self.name))
self.W3 = self.layer.init((input_dim*2, input_dim), name='{}_W3'.format(self.name))
self.b3 = K.zeros((input_dim,), name='{}_b3'.format(self.name))
self.V = self.layer.init((input_dim,), name='{}_V'.format(self.name))
self.trainable_weights = [self.W1, self.W2, self.W3, self.V, self.b2, self.b3]
def get_output_shape_for(self, input_shape):
return self.layer.get_output_shape_for(input_shape)
def step(self, x, states):
# This is based on [tensorflows implementation](https://github.com/tensorflow/tensorflow/blob/c8a45a8e236776bed1d14fd71f3b6755bd63cc58/tensorflow/python/ops/seq2seq.py#L506).
# First, we calculate new attention masks:
# attn = softmax(V^T * tanh(W2 * X +b2 + W1 * h))
# and we make the input as a concatenation of the input and weighted inputs which is then
# transformed back to the shape x of using W3
# x = W3*(x+X*attn)+b3
# Then, we run the cell on a combination of the input and previous attention masks:
# h, state = cell(x, h).
nb_samples, nb_time, input_dim = self.input_spec[0].shape
h = states[0]
X = states[-1]
xW1 = states[-2]
Xr = K.reshape(X,(-1,nb_time,1,input_dim))
hW2 = K.dot(h,self.W2)+self.b2
hW2 = K.reshape(hW2,(-1,1,1,input_dim))
u = K.tanh(xW1+hW2)
a = K.sum(self.V*u,[2,3])
a = K.softmax(a)
a = K.reshape(a,(-1, nb_time, 1, 1))
# Weight attention vector by attention
Xa = K.sum(a*Xr,[1,2])
Xa = K.reshape(Xa,(-1,input_dim))
# Merge input and attention weighted inputs into one vector of the right size.
x = K.dot(K.concatenate([x,Xa],1),self.W3)+self.b3
h, new_states = self.layer.step(x, states)
return h, new_states
def get_constants(self, x):
constants = self.layer.get_constants(x)
# Calculate K.dot(x, W2) only once per sequence by making it a constant
nb_samples, nb_time, input_dim = self.input_spec[0].shape
Xr = K.reshape(x,(-1,nb_time,input_dim,1))
Xrt = K.permute_dimensions(Xr, (0, 2, 1, 3))
xW1t = K.conv2d(Xrt,self.W1,border_mode='same')
xW1 = K.permute_dimensions(xW1t, (0, 2, 3, 1))
constants.append(xW1)
# we need to supply the full sequence of inputs to step (as the attention_vector)
constants.append(x)
return constants
def call(self, x, mask=None):
# input shape: (nb_samples, time (padded with zeros), input_dim)
input_shape = self.input_spec[0].shape
if K._BACKEND == 'tensorflow':
if not input_shape[1]:
raise Exception('When using TensorFlow, you should define '
'explicitly the number of timesteps of '
'your sequences.\n'
'If your first layer is an Embedding, '
'make sure to pass it an "input_length" '
'argument. Otherwise, make sure '
'the first layer has '
'an "input_shape" or "batch_input_shape" '
'argument, including the time axis. '
'Found input shape at layer ' + self.name +
': ' + str(input_shape))
if self.layer.stateful:
initial_states = self.layer.states
else:
initial_states = self.layer.get_initial_states(x)
constants = self.get_constants(x)
preprocessed_input = self.layer.preprocess_input(x)
last_output, outputs, states = K.rnn(self.step, preprocessed_input,
initial_states,
go_backwards=self.layer.go_backwards,
mask=mask,
constants=constants,
unroll=self.layer.unroll,
input_length=input_shape[1])
if self.layer.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.layer.states[i], states[i]))
if self.layer.return_sequences:
return outputs
else:
return last_output
|
msegado/edx-platform
|
refs/heads/master
|
cms/tests/test_startup.py
|
154
|
"""
Test cms startup
"""
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from mock import patch
from cms.startup import run, enable_theme
class StartupTestCase(TestCase):
"""
Test cms startup
"""
def setUp(self):
super(StartupTestCase, self).setUp()
@patch.dict("django.conf.settings.FEATURES", {"USE_CUSTOM_THEME": True})
@override_settings(THEME_NAME="bar")
def test_run_with_theme(self):
self.assertEqual(settings.FEATURES["USE_CUSTOM_THEME"], True)
with patch('cms.startup.enable_theme') as mock_enable_theme:
run()
self.assertTrue(mock_enable_theme.called)
@patch.dict("django.conf.settings.FEATURES", {"USE_CUSTOM_THEME": False})
def test_run_without_theme(self):
self.assertEqual(settings.FEATURES["USE_CUSTOM_THEME"], False)
with patch('cms.startup.enable_theme') as mock_enable_theme:
run()
self.assertFalse(mock_enable_theme.called)
@patch.dict("django.conf.settings.FEATURES", {"USE_CUSTOM_THEME": True})
@override_settings(THEME_NAME="bar")
@override_settings(FAVICON_PATH="images/favicon.ico")
def test_enable_theme(self):
enable_theme()
self.assertEqual(
settings.FAVICON_PATH,
'themes/bar/images/favicon.ico'
)
exp_path = (u'themes/bar', settings.ENV_ROOT / "themes/bar/static")
self.assertIn(exp_path, settings.STATICFILES_DIRS)
|
levilucio/SyVOLT
|
refs/heads/master
|
GM2AUTOSAR_MM/Properties/positive/Himesis/HECUSysTrivialTrueCompleteLHS.py
|
1
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
class HECUSysTrivialTrueCompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HECUSysTrivialTrueCompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HECUSysTrivialTrueCompleteLHS, self).__init__(name='HECUSysTrivialTrueCompleteLHS', num_nodes=3, edges=[])
# Add the edges
self.add_edges([[1, 0], [2, 1]])
# Set the graph attributes
self["mm__"] = ['MT_pre__GM2AUTOSAR_MM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = 5161642466000039534
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__PhysicalNode"""
self.vs[0]["MT_subtypes__"] = []
self.vs[0]["MT_dirty__"] = False
self.vs[0]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["GUID__"] = 8240751758364355905
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_label__"] = """3"""
self.vs[1]["mm__"] = """MT_pre__trace_link"""
self.vs[1]["MT_subtypes__"] = []
self.vs[1]["MT_dirty__"] = False
self.vs[1]["GUID__"] = 8778405656195568961
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """2"""
self.vs[2]["mm__"] = """MT_pre__System"""
self.vs[2]["MT_subtypes__"] = []
self.vs[2]["MT_dirty__"] = False
self.vs[2]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["GUID__"] = 7787791718950261241
def eval_classtype1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
absoludity/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/optionaltags.py
|
1727
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
|
meee1/ardupilotold
|
refs/heads/master
|
Tools/autotest/apm_unit_tests/dev/arducopter_RTL.py
|
250
|
import arducopter
def unit_test(mavproxy, mav):
'''A scripted flight plan'''
if (
arducopter.calibrate_level(mavproxy, mav) and
arducopter.arm_motors(mavproxy, mav) and
arducopter.takeoff(mavproxy,mav, alt_min=80, takeoff_throttle=1510) and
arducopter.hover(mavproxy,mav, hover_throttle=1300) and
arducopter.fly_RTL(mavproxy, mav, side=80, timeout=80)):
return True
return False
|
manazhao/tf_recsys
|
refs/heads/r1.0
|
tensorflow/python/ops/variables.py
|
10
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.util import compat
from tensorflow.python.util import tf_should_use
from tensorflow.python.util.deprecation import deprecated
class Variable(object):
"""See the @{$variables$Variables How To} for a high level overview.
A variable maintains state in the graph across calls to `run()`. You add a
variable to the graph by constructing an instance of the class `Variable`.
The `Variable()` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
If you want to change the shape of a variable later you have to use an
`assign` Op with `validate_shape=False`.
Just like any `Tensor`, variables created with `Variable()` can be used as
inputs for other Ops in the graph. Additionally, all the operators
overloaded for the `Tensor` class are carried over to variables, so you can
also add nodes to the graph by just doing arithmetic on variables.
```python
import tensorflow as tf
# Create a variable.
w = tf.Variable(<initial-value>, name=<optional-name>)
# Use the variable in the graph like any Tensor.
y = tf.matmul(w, ...another variable or tensor...)
# The overloaded operators are available too.
z = tf.sigmoid(w + y)
# Assign a new value to the variable with `assign()` or a related method.
w.assign(w + 1.0)
w.assign_add(1.0)
```
When you launch the graph, variables have to be explicitly initialized before
you can run Ops that use their value. You can initialize a variable by
running its *initializer op*, restoring the variable from a save file, or
simply running an `assign` Op that assigns a value to the variable. In fact,
the variable *initializer op* is just an `assign` Op that assigns the
variable's initial value to the variable itself.
```python
# Launch the graph in a session.
with tf.Session() as sess:
# Run the variable initializer.
sess.run(w.initializer)
# ...you now can run ops that use the value of 'w'...
```
The most common initialization pattern is to use the convenience function
`global_variables_initializer()` to add an Op to the graph that initializes
all the variables. You then run that Op after launching the graph.
```python
# Add an Op to initialize global variables.
init_op = tf.global_variables_initializer()
# Launch the graph in a session.
with tf.Session() as sess:
# Run the Op that initializes global variables.
sess.run(init_op)
# ...you can now run any Op that uses variable values...
```
If you need to create a variable with an initial value dependent on another
variable, use the other variable's `initialized_value()`. This ensures that
variables are initialized in the right order.
All variables are automatically collected in the graph where they are
created. By default, the constructor adds the new variable to the graph
collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function
`global_variables()` returns the contents of that collection.
When building a machine learning model it is often convenient to distinguish
between variables holding the trainable model parameters and other variables
such as a `global step` variable used to count training steps. To make this
easier, the variable constructor supports a `trainable=<bool>` parameter. If
`True`, the new variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. The convenience function
`trainable_variables()` returns the contents of this collection. The
various `Optimizer` classes use this collection as the default list of
variables to optimize.
"""
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None):
"""Creates a new variable with value `initial_value`.
The new variable is added to the graph collections listed in `collections`,
which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
If `trainable` is `True` the variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
This constructor creates both a `variable` Op and an `assign` Op to set the
variable to its initial value.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
variable_def: `VariableDef` protocol buffer. If not `None`, recreates
the Variable object with its contents, referencing the variable's nodes
in the graph, which must already exist. The graph is not changed.
`variable_def` and the other arguments are mutually exclusive.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
expected_shape: A TensorShape. If set, initial_value is expected
to have this shape.
import_scope: Optional `string`. Name scope to add to the
`Variable.` Only used when initializing from protocol buffer.
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
if variable_def:
# If variable_def is provided, recreates the variable from its fields.
if initial_value:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
# Create from initial_value.
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
expected_shape=expected_shape)
def __repr__(self):
return "<tf.Variable '%s' shape=%s dtype=%s>" % (
self.name, self.get_shape(), self.dtype.name)
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
expected_shape=None):
"""Creates a new variable from arguments.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
expected_shape: Deprecated. Ignored.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
_ = expected_shape
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.control_dependencies(None):
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
true_name = ops._name_from_scope_name(name)
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % true_name)]))
# pylint: disable=protected-access
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
self._initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# pylint: enable=protected-access
# Or get the initial value from a Tensor or Python object.
else:
self._initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
# pylint: disable=protected-access
if self._initial_value.op._get_control_flow_context() is not None:
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
# pylint: enable=protected-access
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
# In this case, the variable op can't be created until after the
# initial_value has been converted to a Tensor with a known type.
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# Manually overrides the variable's shape with the initial value's.
if validate_shape:
initial_value_shape = self._initial_value.get_shape()
if not initial_value_shape.is_fully_defined():
raise ValueError("initial_value must have a shape specified: %s" %
self._initial_value)
# If 'initial_value' makes use of other variables, make sure we don't
# have an issue if these other variables aren't initialized first by
# using their initialized_value() method.
self._initializer_op = state_ops.assign(
self._variable,
self._build_initializer_expr(self._initial_value),
validate_shape=validate_shape).op
# TODO(vrv): Change this class to not take caching_device, but
# to take the op to colocate the snapshot with, so we can use
# colocation rather than devices.
if caching_device is not None:
with ops.device(caching_device):
self._snapshot = array_ops.identity(self._variable, name="read")
else:
with ops.colocate_with(self._variable.op):
self._snapshot = array_ops.identity(self._variable, name="read")
ops.add_to_collections(collections, self)
self._caching_device = caching_device
self._save_slice_info = None
def _init_from_proto(self, variable_def, import_scope=None):
"""Recreates the Variable object from a `VariableDef` protocol buffer.
Args:
variable_def: `VariableDef` protocol buffer, describing a variable
whose nodes already exists in the graph.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(variable_def, variable_pb2.VariableDef)
# Create from variable_def.
g = ops.get_default_graph()
self._variable = g.as_graph_element(
ops.prepend_name_scope(variable_def.variable_name,
import_scope=import_scope))
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(variable_def.initializer_name,
import_scope=import_scope))
self._snapshot = g.as_graph_element(
ops.prepend_name_scope(variable_def.snapshot_name,
import_scope=import_scope))
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def)
else:
self._save_slice_info = None
self._caching_device = None
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._variable
def _AsTensor(self): # pylint: disable=invalid-name
"""Converts this variable to a Tensor.
See @{tf.Variable.value}.
Returns:
A `Tensor` containing the value of the variable.
"""
return self._snapshot
def __iter__(self):
"""Dummy method to prevent iteration. Do not call.
NOTE(mrry): If we register __getitem__ as an overloaded operator,
Python will valiantly attempt to iterate over the variable's Tensor from 0
to infinity. Declaring this method prevents this unintended behavior.
Raises:
TypeError: when invoked.
"""
raise TypeError("'Variable' object is not iterable.")
def value(self):
"""Returns the last snapshot of this variable.
You usually do not need to call this method as all ops that need the value
of the variable call it automatically through a `convert_to_tensor()` call.
Returns a `Tensor` which holds the value of the variable. You can not
assign a new value to this tensor as it is not a reference to the variable.
To avoid copies, if the consumer of the returned value is on the same device
as the variable, this actually returns the live value of the variable, not
a copy. Updates to the variable are seen by the consumer. If the consumer
is on a different device it will get a copy of the variable.
Returns:
A `Tensor` containing the value of the variable.
"""
return self._snapshot
def read_value(self):
"""Returns the value of this variable, read in the current context.
Can be different from value() if it's on another device, with control
dependencies, etc.
Returns:
A `Tensor` containing the value of the variable.
"""
return array_ops.identity(self._variable, name="read")
def _ref(self):
"""Returns a reference to this variable.
You usually do not need to call this method as all ops that need a reference
to the variable call it automatically.
Returns is a `Tensor` which holds a reference to the variable. You can
assign a new value to the variable by passing the tensor to an assign op.
See @{tf.Variable.value} if you want to get the value of the
variable.
Returns:
A `Tensor` that is a reference to the variable.
"""
return self._variable
def set_shape(self, shape):
"""Overrides the shape for this variable.
Args:
shape: the `TensorShape` representing the overridden shape.
"""
self._ref().set_shape(shape)
self.value().set_shape(shape)
def eval(self, session=None):
"""In a session, computes and returns the value of this variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See @{tf.Session} for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
print(v.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(v.eval())
```
Args:
session: The session to use to evaluate this variable. If
none, the default session is used.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
return self._variable.eval(session=session)
def initialized_value(self):
"""Returns the value of the initialized variable.
You should use this instead of the variable itself to initialize another
variable with a value that depends on the value of this variable.
```python
# Initialize 'v' with a random tensor.
v = tf.Variable(tf.truncated_normal([10, 40]))
# Use `initialized_value` to guarantee that `v` has been
# initialized before its value is used to initialize `w`.
# The random values are picked only once.
w = tf.Variable(v.initialized_value() * 2.0)
```
Returns:
A `Tensor` holding the value of this variable after its initializer
has run.
"""
with ops.control_dependencies(None):
return control_flow_ops.cond(is_variable_initialized(self),
self.read_value,
lambda: self.initial_value)
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable.
Note that this is different from `initialized_value()` which runs
the op that initializes the variable before returning its value.
This method returns the tensor that is used by the op that initializes
the variable.
Returns:
A `Tensor`.
"""
return self._initial_value
def assign(self, value, use_locking=False):
"""Assigns a new value to the variable.
This is essentially a shortcut for `assign(self, value)`.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
Returns:
A `Tensor` that will hold the new value of this variable after
the assignment has completed.
"""
return state_ops.assign(self._variable, value, use_locking=use_locking)
def assign_add(self, delta, use_locking=False):
"""Adds a value to this variable.
This is essentially a shortcut for `assign_add(self, delta)`.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the addition has completed.
"""
return state_ops.assign_add(self._variable, delta, use_locking=use_locking)
def assign_sub(self, delta, use_locking=False):
"""Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
"""
return state_ops.assign_sub(self._variable, delta, use_locking=use_locking)
def scatter_sub(self, sparse_delta, use_locking=False):
"""Subtracts `IndexedSlices` from this variable.
This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
sparse_delta.values)`.
Args:
sparse_delta: `IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return state_ops.scatter_sub(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking)
def _strided_slice_assign(self,
begin,
end,
strides,
value,
name,
begin_mask,
end_mask,
ellipsis_mask,
new_axis_mask,
shrink_axis_mask):
return gen_array_ops.strided_slice_assign(ref=self._ref(),
begin=begin,
end=end,
strides=strides,
value=value,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
return state_ops.count_up_to(self._variable, limit=limit)
def load(self, value, session=None):
"""Load new value into this variable
Writes new value to variable's memory. Doesn't add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See @{tf.Session} for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
v.load([2, 3], sess)
print(v.eval(sess)) # prints [2 3]
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
v.load([3, 4], sess)
print(v.eval()) # prints [3 4]
```
Args:
value: New variable value
session: The session to use to evaluate this variable. If
none, the default session is used.
Raises:
ValueError: Session is not passed and no default session
"""
session = session or ops.get_default_session()
if session is None:
raise ValueError(
"Either session argument should be provided or default session "
"should be established")
session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
"""Utility function for converting a Variable to a Tensor."""
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref() # pylint: disable=protected-access
else:
return v.value()
@staticmethod
def _OverloadAllOperators(): # pylint: disable=invalid-name
"""Register overloads for all operators."""
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
Variable._OverloadOperator(operator)
# For slicing, bind getitem differently than a tensor (use SliceHelperVar
# instead)
# pylint: disable=protected-access
setattr(Variable, "__getitem__", array_ops._SliceHelperVar)
@staticmethod
def _OverloadOperator(operator): # pylint: disable=invalid-name
"""Defer an operator overload to `ops.Tensor`.
We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
Args:
operator: string. The operator name.
"""
def _run_op(a, *args):
# pylint: disable=protected-access
return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
# Propagate __doc__ to wrapper
try:
_run_op.__doc__ = getattr(ops.Tensor, operator).__doc__
except AttributeError:
pass
setattr(Variable, operator, _run_op)
def _build_initializer_expr(self, initial_value):
"""Build an expression suitable to initialize a variable.
Replace references to variables in initial_value with references to the
variable initial values instead.
Args:
initial_value: original expression
Returns:
A tensorflow expression suitable to initialize a variable.
"""
if isinstance(initial_value, Variable):
return initial_value.initialized_value()
elif isinstance(initial_value, ops.Tensor):
new_op = self._build_initializer_expr(initial_value.op)
if new_op != initial_value.op:
if isinstance(new_op, ops.Tensor):
return new_op
else:
return ops.Tensor(new_op, initial_value.value_index,
initial_value.dtype)
else:
return initial_value
elif isinstance(initial_value, ops.Operation):
if initial_value.node_def.op in [
"IsVariableInitialized", "VarIsInitializedOp", "ReadVariableOp"
]:
return initial_value
if initial_value.node_def.op in ["Variable", "VariableV2", "VarHandleOp"]:
return self._find_initialized_value_for_variable(initial_value)
modified = False
new_inputs = []
for tensor in initial_value.inputs:
new_tensor = self._build_initializer_expr(tensor)
new_inputs.append(new_tensor)
if new_tensor != tensor:
modified = True
if modified:
new_name = initial_value.node_def.name + "_" + self.name
new_name = new_name.replace(":", "_")
new_op = initial_value.node_def.op
new_op = new_op.replace("RefSwitch", "Switch")
new_value = self.graph.create_op(
new_op,
new_inputs,
# pylint: disable=protected-access
initial_value._output_types,
# pylint: enable=protected-access
name=new_name,
attrs=initial_value.node_def.attr)
return new_value
else:
return initial_value
else:
return initial_value
def _find_initialized_value_for_variable(self, variable_op):
"""Find the initial value for a variable op.
To do so, lookup the variable op in the variables collection.
Args:
variable_op: a TensorFlow variable Operation
Returns:
The initial value for the variable.
"""
try:
var_names = [variable_op.node_def.name, variable_op.node_def.name + ":0"]
global_vars = self.graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
for var in global_vars:
if var.name in var_names:
return var.initialized_value()
local_vars = self.graph.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
for var in local_vars:
if var.name == var_names:
return var.initialized_value()
except AttributeError:
# Return the variable itself when an incomplete user defined variable type
# was put in the collection.
return variable_op
return variable_op
# NOTE(mrry): This enables the Variable's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Variables interact
# with ndarrays.
__array_priority__ = 100
@property
def name(self):
"""The name of this variable."""
return self._variable.name
@property
def initializer(self):
"""The initializer operation for this variable."""
return self._initializer_op
@property
def device(self):
"""The device of this variable."""
return self._variable.device
@property
def dtype(self):
"""The `DType` of this variable."""
return self._variable.dtype
@property
def op(self):
"""The `Operation` of this variable."""
return self._variable.op
@property
def graph(self):
"""The `Graph` of this variable."""
return self._variable.graph
@property
def shape(self):
"""The `TensorShape` of this variable.
Returns:
A `TensorShape`.
"""
return self._variable.get_shape()
def get_shape(self):
"""Alias of Variable.shape."""
return self.shape
def to_proto(self, export_scope=None):
"""Converts a `Variable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
if (export_scope is None or
self._variable.name.startswith(export_scope)):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(
self._variable.name, export_scope)
var_def.initializer_name = ops.strip_name_scope(
self.initializer.name, export_scope)
var_def.snapshot_name = ops.strip_name_scope(
self._snapshot.name, export_scope)
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(
export_scope=export_scope))
return var_def
else:
return None
@staticmethod
def from_proto(variable_def, import_scope=None):
"""Returns a `Variable` object created from `variable_def`."""
return Variable(variable_def=variable_def,
import_scope=import_scope)
class SaveSliceInfo(object):
"""Information on how to save this Variable as a slice.
Provides internal support for saving variables as slices of a larger
variable. This API is not public and is subject to change.
Available properties:
* full_name
* full_shape
* var_offset
* var_shape
"""
def __init__(self,
full_name=None,
full_shape=None,
var_offset=None,
var_shape=None,
save_slice_info_def=None,
import_scope=None):
"""Create a `SaveSliceInfo`.
Args:
full_name: Name of the full variable of which this `Variable` is a
slice.
full_shape: Shape of the full variable, as a list of int.
var_offset: Offset of this `Variable` into the full variable, as a
list of int.
var_shape: Shape of this `Variable`, as a list of int.
save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
recreates the SaveSliceInfo object its contents.
`save_slice_info_def` and other arguments are mutually
exclusive.
import_scope: Optional `string`. Name scope to add. Only used
when initializing from protocol buffer.
"""
if save_slice_info_def:
assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
self.full_name = ops.prepend_name_scope(
save_slice_info_def.full_name, import_scope=import_scope)
self.full_shape = [i for i in save_slice_info_def.full_shape]
self.var_offset = [i for i in save_slice_info_def.var_offset]
self.var_shape = [i for i in save_slice_info_def.var_shape]
else:
self.full_name = full_name
self.full_shape = full_shape
self.var_offset = var_offset
self.var_shape = var_shape
@property
def spec(self):
"""Computes the spec string used for saving."""
full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
sl_spec = ":".join([
"%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
])
return full_shape_str + sl_spec
def to_proto(self, export_scope=None):
"""Returns a SaveSliceInfoDef() proto.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
in the specified name scope.
"""
if (export_scope is None or
self.full_name.startswith(export_scope)):
save_slice_info_def = variable_pb2.SaveSliceInfoDef()
save_slice_info_def.full_name = ops.strip_name_scope(
self.full_name, export_scope)
for i in self.full_shape:
save_slice_info_def.full_shape.append(i)
for i in self.var_offset:
save_slice_info_def.var_offset.append(i)
for i in self.var_shape:
save_slice_info_def.var_shape.append(i)
return save_slice_info_def
else:
return None
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `Variable`.
Args:
save_slice_info: A `Variable.SaveSliceInfo` object.
"""
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
class PartitionedVariable(object):
"""A container for partitioned `Variable` objects."""
class PartitionedVariableIterator(object):
"""An iterator that allows accessing the underlying `Variable` objects.
This iterator is necessary to control order of access when Variables
are not partitioned in a standard way along a single axis.
Allows e.g. `list(partitioned_variable)` to return a proper list.
"""
def __init__(self, partitioned_variable):
self._ix = 0
self._partitioned_variable = partitioned_variable
def __iter__(self):
return self
def __next__(self): # For python3 compatibility.
return self.next()
def next(self):
# pylint: disable=protected-access
if self._ix >= len(self._partitioned_variable._variable_list):
raise StopIteration()
variable = self._partitioned_variable._variable_list[self._ix]
# pylint: enable=protected-access
self._ix += 1
return variable
def __init__(self, name, shape, dtype, variable_list, partitions):
"""Creates a new partitioned variable wrapper.
Variables passed via the variable_list must contain a save_slice_info
field. Concatenation and iteration is in lexicographic order according
to the var_offset property of the save_slice_info.
Args:
name: String. Overall name of the variables.
shape: List of integers. Overall shape of the variables.
dtype: Type of the variables.
variable_list: List of `Variable` that comprise this partitioned variable.
partitions: List of integers. Number of partitions for each dimension.
Raises:
TypeError: If `variable_list` is not a list of `Variable` objects, or
`partitions` is not a list.
ValueError: If `variable_list` is empty, or the `Variable` shape
information does not match `shape`, or `partitions` has invalid values.
"""
if not isinstance(variable_list, (list, tuple)):
raise TypeError(
"variable_list is not a list or tuple: %s" % variable_list)
if not isinstance(partitions, (list, tuple)):
raise TypeError("partitions is not a list or tuple: %s" % partitions)
if not all([p >= 1 for p in partitions]):
raise ValueError("partition values must be positive: %s" % partitions)
if not variable_list:
raise ValueError("variable_list may not be empty")
# pylint: disable=protected-access
for v in variable_list:
# Sort the variable_list lexicographically according to var offset value.
if not all([v._get_save_slice_info() is not None for v in variable_list]):
raise ValueError(
"All variables must have a save_slice_info available: %s"
% [v.name for v in variable_list])
if len(shape) != len(partitions):
raise ValueError("len(shape) != len(partitions): %s vs. %s"
% (shape, partitions))
if not all([v._get_save_slice_info().full_shape == shape]):
raise ValueError(
"All variables' full shapes must match shape: %s; "
"but full shapes were: %s"
% (shape, str([v._get_save_slice_info().full_shape])))
self._variable_list = sorted(
variable_list, key=lambda v: v._get_save_slice_info().var_offset)
# pylint: enable=protected-access
self._name = name
self._shape = shape
self._dtype = dtype
self._partitions = partitions
self._as_tensor = None
def __iter__(self):
"""Return an iterable for accessing the underlying partition Variables."""
return self.PartitionedVariableIterator(self)
def __len__(self):
num_partition_axes = len(self._partition_axes())
if num_partition_axes > 1:
raise ValueError("Cannot get a length for %d > 1 partition axes"
% num_partition_axes)
return len(self._variable_list)
def _partition_axes(self):
if all([p == 1 for p in self._partitions]):
return [0]
else:
return [i for i, p in enumerate(self._partitions) if p > 1]
def _concat(self):
"""Returns the overall concatenated value as a `Tensor`.
This is different from using the partitioned variable directly as a tensor
(through tensor conversion and `as_tensor`) in that it creates a new set of
operations that keeps the control dependencies from its scope.
Returns:
`Tensor` containing the concatenated value.
"""
if len(self._variable_list) == 1:
with ops.name_scope(None):
return array_ops.identity(self._variable_list[0], name=self._name)
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot concatenate along more than one dimension: %s. "
"Multi-axis partition concat is not supported" % str(partition_axes))
partition_ix = partition_axes[0]
with ops.name_scope(self._name + "/ConcatPartitions/"):
concatenated = array_ops.concat(self._variable_list, partition_ix)
with ops.name_scope(None):
return array_ops.identity(concatenated, name=self._name)
def as_tensor(self):
"""Returns the overall concatenated value as a `Tensor`.
The returned tensor will not inherit the control dependencies from the scope
where the value is used, which is similar to getting the value of
`Variable`.
Returns:
`Tensor` containing the concatenated value.
"""
with ops.control_dependencies(None):
return self._concat()
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
# pylint: disable=invalid-name
_ = name
if dtype is not None and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
raise NotImplementedError(
"PartitionedVariable doesn't support being used as a reference.")
else:
return v.as_tensor()
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
def get_shape(self):
return self._shape
def _get_variable_list(self):
return self._variable_list
def _get_partitions(self):
return self._partitions
def assign(self, value, use_locking=False):
_ = value, use_locking
raise NotImplementedError(
"assign() has not been implemented for PartitionedVariable.")
def global_variables():
"""Returns global variables.
Global variables are variables that are shared across machines in a
distributed environment. The `Variable()` constructor or `get_variable()`
automatically adds new variables to the graph collection
`GraphKeys.GLOBAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to global variables are local variables. See
@{tf.local_variables}
Returns:
A list of `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
@deprecated("2017-03-02", "Please use tf.global_variables instead.")
def all_variables():
"""See `tf.global_variables`."""
return global_variables()
def _all_saveable_objects():
"""Returns all variables and `SaveableObject`s that must be checkpointed.
Returns:
A list of `Variable` and `SaveableObject` to be checkpointed
"""
# TODO(andreasst): make this function public once things are settled.
return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) +
ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))
def local_variables():
"""Returns local variables.
Local variables - per process variables, usually not saved/restored to
checkpoint and used for temporary or intermediate values.
For example, they can be used as counters for metrics computation or
number of epochs this machine has read data.
The `tf.contrib.framework.local_variable()` function automatically adds the
new variable to `GraphKeys.LOCAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to local variables are global variables. See
@{tf.global_variables}
Returns:
A list of local `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def model_variables():
"""Returns all variables in the MODEL_VARIABLES collection.
Returns:
A list of local Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES)
def trainable_variables():
"""Returns all variables created with `trainable=True`.
When passed `trainable=True`, the `Variable()` constructor automatically
adds new variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the
contents of that collection.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def moving_average_variables():
"""Returns all variables that maintain their moving averages.
If an `ExponentialMovingAverage` object is created and the `apply()`
method is called on a list of variables, these variables will
be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
This convenience function returns the contents of that collection.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES)
def variables_initializer(var_list, name="init"):
"""Returns an Op that initializes a list of variables.
After you launch the graph in a session, you can run the returned Op to
initialize all the variables in `var_list`. This Op runs all the
initializers of the variables in `var_list` in parallel.
Calling `initialize_variables()` is equivalent to passing the list of
initializers to `Group()`.
If `var_list` is empty, however, the function still returns an Op that can
be run. That Op just has no effect.
Args:
var_list: List of `Variable` objects to initialize.
name: Optional name for the returned operation.
Returns:
An Op that run the initializers of all the specified variables.
"""
if var_list:
return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
return control_flow_ops.no_op(name=name)
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.variables_initializer` instead.")
def initialize_variables(var_list, name="init"):
"""See `tf.variables_initializer`."""
return variables_initializer(var_list, name=name)
def global_variables_initializer():
"""Returns an Op that initializes global variables.
This is just a shortcut for `variables_initializer(global_variables())`
Returns:
An Op that initializes global variables in the graph.
"""
return variables_initializer(global_variables())
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")
def initialize_all_variables():
"""See `tf.global_variables_initializer`."""
return global_variables_initializer()
def local_variables_initializer():
"""Returns an Op that initializes all local variables.
This is just a shortcut for `variables_initializer(local_variables())`
Returns:
An Op that initializes all local variables in the graph.
"""
return variables_initializer(local_variables())
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.")
def initialize_local_variables():
"""See `tf.local_variables_initializer`."""
return local_variables_initializer()
@tf_should_use.should_use_result
def is_variable_initialized(variable):
"""Tests if a variable has been initialized.
Args:
variable: A `Variable`.
Returns:
Returns a scalar boolean Tensor, `True` if the variable has been
initialized, `False` otherwise.
"""
return state_ops.is_variable_initialized(variable)
@tf_should_use.should_use_result
def assert_variables_initialized(var_list=None):
"""Returns an Op to check if variables are initialized.
NOTE: This function is obsolete and will be removed in 6 months. Please
change your implementation to use `report_uninitialized_variables()`.
When run, the returned Op will raise the exception `FailedPreconditionError`
if any of the variables has not yet been initialized.
Note: This function is implemented by trying to fetch the values of the
variables. If one of the variables is not initialized a message may be
logged by the C++ runtime. This is expected.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `global_variables().`
Returns:
An Op, or None if there are no variables.
"""
if var_list is None:
var_list = global_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
return None
else:
ranks = []
for var in var_list:
with ops.colocate_with(var.op):
ranks.append(array_ops.rank_internal(var, optimize=False))
if len(ranks) == 1:
return ranks[0]
else:
return array_ops.stack(ranks)
@tf_should_use.should_use_result
def report_uninitialized_variables(var_list=None,
name="report_uninitialized_variables"):
"""Adds ops to list the names of uninitialized variables.
When run, it returns a 1-D tensor containing the names of uninitialized
variables if there are any, or an empty array if there are none.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `global_variables() + local_variables()`
name: Optional name of the `Operation`.
Returns:
A 1-D tensor containing names of the uninitialized variables, or an empty
1-D tensor if there are no variables or no uninitialized variables.
"""
if var_list is None:
var_list = global_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
with ops.name_scope(name):
# Run all operations on CPU
with ops.device("/cpu:0"):
if not var_list:
# Return an empty tensor so we only need to check for returned tensor
# size being 0 as an indication of model ready.
return array_ops.constant([], dtype=dtypes.string)
else:
# Get a 1-D boolean tensor listing whether each variable is initialized.
variables_mask = math_ops.logical_not(
array_ops.stack(
[state_ops.is_variable_initialized(v) for v in var_list]))
# Get a 1-D string tensor containing all the variable names.
variable_names_tensor = array_ops.constant(
[s.op.name for s in var_list])
# Return a 1-D tensor containing all the names of
# uninitialized variables.
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
# pylint: disable=protected-access
Variable._OverloadAllOperators()
ops.register_tensor_conversion_function(
PartitionedVariable, PartitionedVariable._TensorConversionFunction)
# pylint: enable=protected-access
ops.register_dense_tensor_like_type(Variable)
|
brunobergher/dotfiles
|
refs/heads/master
|
sublime/pygments/all/pygments/lexers/data.py
|
45
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.data
~~~~~~~~~~~~~~~~~~~~
Lexers for data file format.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
include, bygroups, inherit
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Punctuation, Literal
__all__ = ['YamlLexer', 'JsonLexer', 'JsonLdLexer']
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
.. versionadded:: 0.11
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?!\s|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![\w-]*!)'
r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors': [
# a full-form tag
(r'!<[\w;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[\w-]+)?'
r'(?:![\w;/?:@&=+$,.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[\w-]+', Name.Label),
# an alias
(r'\*[\w-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[\S\t ]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^\s\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^\s"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^\s,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class JsonLexer(RegexLexer):
"""
For JSON data structures.
.. versionadded:: 1.5
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = ['application/json']
flags = re.DOTALL
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'\}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'\}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r'\]', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'\{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
class JsonLdLexer(JsonLexer):
"""
For `JSON-LD <http://json-ld.org/>`_ linked data.
.. versionadded:: 2.0
"""
name = 'JSON-LD'
aliases = ['jsonld', 'json-ld']
filenames = ['*.jsonld']
mimetypes = ['application/ld+json']
tokens = {
'objectvalue': [
(r'"@(context|id|value|language|type|container|list|set|'
r'reverse|index|base|vocab|graph)"', Name.Decorator,
'objectattribute'),
inherit,
],
}
|
froydnj/servo
|
refs/heads/master
|
python/mach/mach/test/providers/throw2.py
|
131
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This file exists to trigger the differences in mach error reporting between
# exceptions that occur in mach command modules themselves and in the things
# they call.
def throw_deep(message):
return throw_real(message)
def throw_real(message):
raise Exception(message)
|
SamuelDSR/YouCompleteMe-Win7-GVIM
|
refs/heads/master
|
third_party/waitress/waitress/channel.py
|
3
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import asyncore
import socket
import time
import traceback
from waitress.buffers import (
OverflowableBuffer,
ReadOnlyFileBasedBuffer,
)
from waitress.parser import HTTPRequestParser
from waitress.compat import thread
from waitress.task import (
ErrorTask,
WSGITask,
)
from waitress.utilities import (
logging_dispatcher,
InternalServerError,
)
class HTTPChannel(logging_dispatcher, object):
"""
Setting self.requests = [somerequest] prevents more requests from being
received until the out buffers have been flushed.
Setting self.requests = [] allows more requests to be received.
"""
task_class = WSGITask
error_task_class = ErrorTask
parser_class = HTTPRequestParser
request = None # A request parser instance
last_activity = 0 # Time of last activity
will_close = False # set to True to close the socket.
close_when_flushed = False # set to True to close the socket when flushed
requests = () # currently pending requests
sent_continue = False # used as a latch after sending 100 continue
force_flush = False # indicates a need to flush the outbuf
#
# ASYNCHRONOUS METHODS (including __init__)
#
def __init__(
self,
server,
sock,
addr,
adj,
map=None,
):
self.server = server
self.adj = adj
self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
self.creation_time = self.last_activity = time.time()
# task_lock used to push/pop requests
self.task_lock = thread.allocate_lock()
# outbuf_lock used to access any outbuf
self.outbuf_lock = thread.allocate_lock()
asyncore.dispatcher.__init__(self, sock, map=map)
# Don't let asyncore.dispatcher throttle self.addr on us.
self.addr = addr
def any_outbuf_has_data(self):
for outbuf in self.outbufs:
if bool(outbuf):
return True
return False
def total_outbufs_len(self):
return sum([len(b) for b in self.outbufs]) # genexpr == more funccalls
def writable(self):
# if there's data in the out buffer or we've been instructed to close
# the channel (possibly by our server maintenance logic), run
# handle_write
return self.any_outbuf_has_data() or self.will_close
def handle_write(self):
# Precondition: there's data in the out buffer to be sent, or
# there's a pending will_close request
if not self.connected:
# we dont want to close the channel twice
return
# try to flush any pending output
if not self.requests:
# 1. There are no running tasks, so we don't need to try to lock
# the outbuf before sending
# 2. The data in the out buffer should be sent as soon as possible
# because it's either data left over from task output
# or a 100 Continue line sent within "received".
flush = self._flush_some
elif self.force_flush:
# 1. There's a running task, so we need to try to lock
# the outbuf before sending
# 2. This is the last chunk sent by the Nth of M tasks in a
# sequence on this channel, so flush it regardless of whether
# it's >= self.adj.send_bytes. We need to do this now, or it
# won't get done.
flush = self._flush_some_if_lockable
self.force_flush = False
elif (self.total_outbufs_len() >= self.adj.send_bytes):
# 1. There's a running task, so we need to try to lock
# the outbuf before sending
# 2. Only try to send if the data in the out buffer is larger
# than self.adj_bytes to avoid TCP fragmentation
flush = self._flush_some_if_lockable
else:
# 1. There's not enough data in the out buffer to bother to send
# right now.
flush = None
if flush:
try:
flush()
except socket.error:
if self.adj.log_socket_errors:
self.logger.exception('Socket error')
self.will_close = True
except:
self.logger.exception('Unexpected exception when flushing')
self.will_close = True
if self.close_when_flushed and not self.any_outbuf_has_data():
self.close_when_flushed = False
self.will_close = True
if self.will_close:
self.handle_close()
def readable(self):
# We might want to create a new task. We can only do this if:
# 1. We're not already about to close the connection.
# 2. There's no already currently running task(s).
# 3. There's no data in the output buffer that needs to be sent
# before we potentially create a new task.
return not (self.will_close or self.requests or
self.any_outbuf_has_data())
def handle_read(self):
try:
data = self.recv(self.adj.recv_bytes)
except socket.error:
if self.adj.log_socket_errors:
self.logger.exception('Socket error')
self.handle_close()
return
if data:
self.last_activity = time.time()
self.received(data)
def received(self, data):
"""
Receives input asynchronously and assigns one or more requests to the
channel.
"""
# Preconditions: there's no task(s) already running
request = self.request
requests = []
if not data:
return False
while data:
if request is None:
request = self.parser_class(self.adj)
n = request.received(data)
if request.expect_continue and request.headers_finished:
# guaranteed by parser to be a 1.1 request
request.expect_continue = False
if not self.sent_continue:
# there's no current task, so we don't need to try to
# lock the outbuf to append to it.
self.outbufs[-1].append(b'HTTP/1.1 100 Continue\r\n\r\n')
self.sent_continue = True
self._flush_some()
request.completed = False
if request.completed:
# The request (with the body) is ready to use.
self.request = None
if not request.empty:
requests.append(request)
request = None
else:
self.request = request
if n >= len(data):
break
data = data[n:]
if requests:
self.requests = requests
self.server.add_task(self)
return True
def _flush_some_if_lockable(self):
# Since our task may be appending to the outbuf, we try to acquire
# the lock, but we don't block if we can't.
locked = self.outbuf_lock.acquire(0)
if locked:
try:
self._flush_some()
finally:
self.outbuf_lock.release()
def _flush_some(self):
# Send as much data as possible to our client
sent = 0
dobreak = False
while True:
outbuf = self.outbufs[0]
outbuflen = len(outbuf)
if outbuflen <= 0:
# self.outbufs[-1] must always be a writable outbuf
if len(self.outbufs) > 1:
toclose = self.outbufs.pop(0)
try:
toclose._close()
except:
self.logger.exception(
'Unexpected error when closing an outbuf')
continue # pragma: no cover (coverage bug, it is hit)
else:
dobreak = True
while outbuflen > 0:
chunk = outbuf.get(self.adj.send_bytes)
num_sent = self.send(chunk)
if num_sent:
outbuf.skip(num_sent, True)
outbuflen -= num_sent
sent += num_sent
else:
dobreak = True
break
if dobreak:
break
if sent:
self.last_activity = time.time()
return True
return False
def handle_close(self):
for outbuf in self.outbufs:
try:
outbuf._close()
except:
self.logger.exception(
'Unknown exception while trying to close outbuf')
self.connected = False
asyncore.dispatcher.close(self)
def add_channel(self, map=None):
"""See asyncore.dispatcher
This hook keeps track of opened channels.
"""
asyncore.dispatcher.add_channel(self, map)
self.server.active_channels[self._fileno] = self
def del_channel(self, map=None):
"""See asyncore.dispatcher
This hook keeps track of closed channels.
"""
fd = self._fileno # next line sets this to None
asyncore.dispatcher.del_channel(self, map)
ac = self.server.active_channels
if fd in ac:
del ac[fd]
#
# SYNCHRONOUS METHODS
#
def write_soon(self, data):
if data:
# the async mainloop might be popping data off outbuf; we can
# block here waiting for it because we're in a task thread
with self.outbuf_lock:
if data.__class__ is ReadOnlyFileBasedBuffer:
# they used wsgi.file_wrapper
self.outbufs.append(data)
nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
self.outbufs.append(nextbuf)
else:
self.outbufs[-1].append(data)
# XXX We might eventually need to pull the trigger here (to
# instruct select to stop blocking), but it slows things down so
# much that I'll hold off for now; "server push" on otherwise
# unbusy systems may suffer.
return len(data)
return 0
def service(self):
"""Execute all pending requests """
with self.task_lock:
while self.requests:
request = self.requests[0]
if request.error:
task = self.error_task_class(self, request)
else:
task = self.task_class(self, request)
try:
task.service()
except:
self.logger.exception('Exception when serving %s' %
task.request.path)
if not task.wrote_header:
if self.adj.expose_tracebacks:
body = traceback.format_exc()
else:
body = ('The server encountered an unexpected '
'internal server error')
req_version = request.version
req_headers = request.headers
request = self.parser_class(self.adj)
request.error = InternalServerError(body)
# copy some original request attributes to fulfill
# HTTP 1.1 requirements
request.version = req_version
try:
request.headers['CONNECTION'] = req_headers[
'CONNECTION']
except KeyError:
pass
task = self.error_task_class(self, request)
task.service() # must not fail
else:
task.close_on_finish = True
# we cannot allow self.requests to drop to empty til
# here; otherwise the mainloop gets confused
if task.close_on_finish:
self.close_when_flushed = True
for request in self.requests:
request._close()
self.requests = []
else:
request = self.requests.pop(0)
request._close()
self.force_flush = True
self.server.pull_trigger()
self.last_activity = time.time()
def cancel(self):
""" Cancels all pending requests """
self.force_flush = True
self.last_activity = time.time()
self.requests = []
def defer(self):
pass
|
intgr/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex_multi_apps/app1/3_auto.py
|
133
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("app1", "2_auto"), ("app2", "2_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
zooba/PTVS
|
refs/heads/master
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda_env/cli/main_list.py
|
7
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from argparse import RawDescriptionHelpFormatter
from conda.cli import common
from conda.cli.conda_argparse import add_parser_json
from conda.core.envs_manager import list_all_known_prefixes
description = """
List the Conda environments
"""
example = """
examples:
conda env list
conda env list --json
"""
def configure_parser(sub_parsers):
list_parser = sub_parsers.add_parser(
'list',
formatter_class=RawDescriptionHelpFormatter,
description=description,
help=description,
epilog=example,
)
add_parser_json(list_parser)
list_parser.set_defaults(func='.main_list.execute')
def execute(args, parser):
info_dict = {'envs': list_all_known_prefixes()}
common.print_envs_list(info_dict['envs'], not args.json)
if args.json:
common.stdout_json(info_dict)
|
poiati/django
|
refs/heads/master
|
tests/admin_inlines/test_templates.py
|
285
|
from __future__ import unicode_literals
from django.template.loader import render_to_string
from django.test import SimpleTestCase
class TestTemplates(SimpleTestCase):
def test_javascript_escaping(self):
context = {
'inline_admin_formset': {
'formset': {'prefix': 'my-prefix'},
'opts': {'verbose_name': 'verbose name\\'},
},
}
output = render_to_string('admin/edit_inline/stacked.html', context)
self.assertIn('prefix: "my\\u002Dprefix",', output)
self.assertIn('addText: "Add another Verbose name\\u005C"', output)
output = render_to_string('admin/edit_inline/tabular.html', context)
self.assertIn('prefix: "my\\u002Dprefix",', output)
self.assertIn('addText: "Add another Verbose name\\u005C"', output)
|
kotoroshinoto/Cluster_SimpleJob_Generator
|
refs/heads/master
|
pybin/DGESeqSimulation/aligntables.py
|
1
|
"""
aligntables.py script
version 2013.08.09
@author: Bing
takes data from two spreadsheets and aligns the independent variables
"""
import os, sys
input_dir = os.path.join(os.path.dirname(__file__), 'Output')
old_dir = os.getcwd()
os.chdir(input_dir)
'''
if len(sys.argv) != 3:
sys.stderr.write('\nScript must be given 2 input files')
filename1 = sys.argv[1]
filename2 = sys.argv[2]
'''
def aligntables(filename1, filename2):
f1 = open(filename1, 'r')
f2 = open(filename2, 'r')
f3 = open('Matched_SeqSim_Pt4_' + filename1[11:14] + filename2[11:14] + 'comparison.txt', 'w')
f4 = open('Unmatched_SeqSim_Pt4_' + filename1[11:14] + filename2[11:14] + 'comparison.txt', 'w')
f1_transcripts = {}
f2_transcripts = {}
for line1 in f1:
line1 = line1.split('\t')
f1_transcripts[line1[0]] = [line1[0], line1[2]]
for line2 in f2:
line2 = line2.split('\t')
f2_transcripts[line2[0]] = [line2[0], line2[2]]
if not line2[0] in f1_transcripts:
f1_transcripts[line2[0]] = [line2[0], '0']
for keys in f1_transcripts:
'''
f1_table = []
f2_table = []
for line1 in f1:
for line2 in f2:
line2 = line2.split('\t')
f2_table.append(line2)
linesplit1 = line1.split('\t')
for x in range(0, len(f2_table)):
if linesplit1[0] == f2_table[x][0]:
f3.write(line1)
else:
f4.write(line1)
f1_table.append(line1 + '\n')
'''
f1.close()
f2.close()
f3.close()
f4.close()
aligntables('SeqSim_Pt4_NC_results.txt', 'SeqSim_Pt4_TC_results.txt')
aligntables('SeqSim_Pt4_NF_results.txt', 'SeqSim_Pt4_TF_results.txt')
aligntables('SeqSim_Pt5_NC_results.txt', 'SeqSim_Pt5_TC_results.txt')
aligntables('SeqSim_Pt5_NF_results.txt', 'SeqSim_Pt5_TF_results.txt')
|
chris-wood/ns-3-dev-ndnSIM
|
refs/heads/ndnSIM-v2
|
src/buildings/bindings/callbacks_list.py
|
664
|
callback_classes = [
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
CapeDrew/DITK
|
refs/heads/master
|
Examples/Filtering/CurvatureAnisotropicDiffusionImageFilter.py
|
13
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the CurvatureAnisotropicDiffusionImageFilter
#
from InsightToolkit import *
from sys import argv
reader = itkImageFileReaderUS2_New()
writer = itkImageFileWriterUS2_New()
inputCast = itkCastImageFilterUS2F2_New()
outputCast = itkRescaleIntensityImageFilterF2US2_New()
filter = itkCurvatureAnisotropicDiffusionImageFilterF2F2_New()
inputCast.SetInput( reader.GetOutput() )
filter.SetInput( inputCast.GetOutput() )
outputCast.SetInput( filter.GetOutput() )
writer.SetInput( outputCast.GetOutput() )
reader.SetFileName( argv[1] )
writer.SetFileName( argv[2] )
outputCast.SetOutputMinimum( 0 )
outputCast.SetOutputMaximum( 65535 )
numberOfIterations = eval( argv[3] )
timeStep = eval( argv[4] )
conductance = eval( argv[5] )
filter.SetNumberOfIterations( numberOfIterations )
filter.SetTimeStep( timeStep )
filter.SetConductanceParameter( conductance )
writer.Update()
|
soycode/pattern
|
refs/heads/master
|
pattern/server/cherrypy/cherrypy/lib/auth_digest.py
|
101
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
__doc__ = """An implementation of the server-side of HTTP Digest Access
Authentication, which is described in :rfc:`2617`.
Example usage, using the built-in get_ha1_dict_plain function which uses a dict
of plaintext passwords as the credentials store::
userpassdict = {'alice' : '4x5istwelve'}
get_ha1 = cherrypy.lib.auth_digest.get_ha1_dict_plain(userpassdict)
digest_auth = {'tools.auth_digest.on': True,
'tools.auth_digest.realm': 'wonderland',
'tools.auth_digest.get_ha1': get_ha1,
'tools.auth_digest.key': 'a565c27146791cfb',
}
app_config = { '/' : digest_auth }
"""
__author__ = 'visteya'
__date__ = 'April 2009'
import time
from cherrypy._cpcompat import parse_http_list, parse_keqv_list
import cherrypy
from cherrypy._cpcompat import md5, ntob
md5_hex = lambda s: md5(ntob(s)).hexdigest()
qop_auth = 'auth'
qop_auth_int = 'auth-int'
valid_qops = (qop_auth, qop_auth_int)
valid_algorithms = ('MD5', 'MD5-sess')
def TRACE(msg):
cherrypy.log(msg, context='TOOLS.AUTH_DIGEST')
# Three helper functions for users of the tool, providing three variants
# of get_ha1() functions for three different kinds of credential stores.
def get_ha1_dict_plain(user_password_dict):
"""Returns a get_ha1 function which obtains a plaintext password from a
dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, with plaintext
passwords, use get_ha1_dict_plain(my_userpass_dict) as the value for the
get_ha1 argument to digest_auth().
"""
def get_ha1(realm, username):
password = user_password_dict.get(username)
if password:
return md5_hex('%s:%s:%s' % (username, realm, password))
return None
return get_ha1
def get_ha1_dict(user_ha1_dict):
"""Returns a get_ha1 function which obtains a HA1 password hash from a
dictionary of the form: {username : HA1}.
If you want a dictionary-based authentication scheme, but with
pre-computed HA1 hashes instead of plain-text passwords, use
get_ha1_dict(my_userha1_dict) as the value for the get_ha1
argument to digest_auth().
"""
def get_ha1(realm, username):
return user_ha1_dict.get(user)
return get_ha1
def get_ha1_file_htdigest(filename):
"""Returns a get_ha1 function which obtains a HA1 password hash from a
flat file with lines of the same format as that produced by the Apache
htdigest utility. For example, for realm 'wonderland', username 'alice',
and password '4x5istwelve', the htdigest line would be::
alice:wonderland:3238cdfe91a8b2ed8e39646921a02d4c
If you want to use an Apache htdigest file as the credentials store,
then use get_ha1_file_htdigest(my_htdigest_file) as the value for the
get_ha1 argument to digest_auth(). It is recommended that the filename
argument be an absolute path, to avoid problems.
"""
def get_ha1(realm, username):
result = None
f = open(filename, 'r')
for line in f:
u, r, ha1 = line.rstrip().split(':')
if u == username and r == realm:
result = ha1
break
f.close()
return result
return get_ha1
def synthesize_nonce(s, key, timestamp=None):
"""Synthesize a nonce value which resists spoofing and can be checked for staleness.
Returns a string suitable as the value for 'nonce' in the www-authenticate header.
s
A string related to the resource, such as the hostname of the server.
key
A secret string known only to the server.
timestamp
An integer seconds-since-the-epoch timestamp
"""
if timestamp is None:
timestamp = int(time.time())
h = md5_hex('%s:%s:%s' % (timestamp, s, key))
nonce = '%s:%s' % (timestamp, h)
return nonce
def H(s):
"""The hash function H"""
return md5_hex(s)
class HttpDigestAuthorization (object):
"""Class to parse a Digest Authorization header and perform re-calculation
of the digest.
"""
def errmsg(self, s):
return 'Digest Authorization header: %s' % s
def __init__(self, auth_header, http_method, debug=False):
self.http_method = http_method
self.debug = debug
scheme, params = auth_header.split(" ", 1)
self.scheme = scheme.lower()
if self.scheme != 'digest':
raise ValueError('Authorization scheme is not "Digest"')
self.auth_header = auth_header
# make a dict of the params
items = parse_http_list(params)
paramsd = parse_keqv_list(items)
self.realm = paramsd.get('realm')
self.username = paramsd.get('username')
self.nonce = paramsd.get('nonce')
self.uri = paramsd.get('uri')
self.method = paramsd.get('method')
self.response = paramsd.get('response') # the response digest
self.algorithm = paramsd.get('algorithm', 'MD5')
self.cnonce = paramsd.get('cnonce')
self.opaque = paramsd.get('opaque')
self.qop = paramsd.get('qop') # qop
self.nc = paramsd.get('nc') # nonce count
# perform some correctness checks
if self.algorithm not in valid_algorithms:
raise ValueError(self.errmsg("Unsupported value for algorithm: '%s'" % self.algorithm))
has_reqd = self.username and \
self.realm and \
self.nonce and \
self.uri and \
self.response
if not has_reqd:
raise ValueError(self.errmsg("Not all required parameters are present."))
if self.qop:
if self.qop not in valid_qops:
raise ValueError(self.errmsg("Unsupported value for qop: '%s'" % self.qop))
if not (self.cnonce and self.nc):
raise ValueError(self.errmsg("If qop is sent then cnonce and nc MUST be present"))
else:
if self.cnonce or self.nc:
raise ValueError(self.errmsg("If qop is not sent, neither cnonce nor nc can be present"))
def __str__(self):
return 'authorization : %s' % self.auth_header
def validate_nonce(self, s, key):
"""Validate the nonce.
Returns True if nonce was generated by synthesize_nonce() and the timestamp
is not spoofed, else returns False.
s
A string related to the resource, such as the hostname of the server.
key
A secret string known only to the server.
Both s and key must be the same values which were used to synthesize the nonce
we are trying to validate.
"""
try:
timestamp, hashpart = self.nonce.split(':', 1)
s_timestamp, s_hashpart = synthesize_nonce(s, key, timestamp).split(':', 1)
is_valid = s_hashpart == hashpart
if self.debug:
TRACE('validate_nonce: %s' % is_valid)
return is_valid
except ValueError: # split() error
pass
return False
def is_nonce_stale(self, max_age_seconds=600):
"""Returns True if a validated nonce is stale. The nonce contains a
timestamp in plaintext and also a secure hash of the timestamp. You should
first validate the nonce to ensure the plaintext timestamp is not spoofed.
"""
try:
timestamp, hashpart = self.nonce.split(':', 1)
if int(timestamp) + max_age_seconds > int(time.time()):
return False
except ValueError: # int() error
pass
if self.debug:
TRACE("nonce is stale")
return True
def HA2(self, entity_body=''):
"""Returns the H(A2) string. See :rfc:`2617` section 3.2.2.3."""
# RFC 2617 3.2.2.3
# If the "qop" directive's value is "auth" or is unspecified, then A2 is:
# A2 = method ":" digest-uri-value
#
# If the "qop" value is "auth-int", then A2 is:
# A2 = method ":" digest-uri-value ":" H(entity-body)
if self.qop is None or self.qop == "auth":
a2 = '%s:%s' % (self.http_method, self.uri)
elif self.qop == "auth-int":
a2 = "%s:%s:%s" % (self.http_method, self.uri, H(entity_body))
else:
# in theory, this should never happen, since I validate qop in __init__()
raise ValueError(self.errmsg("Unrecognized value for qop!"))
return H(a2)
def request_digest(self, ha1, entity_body=''):
"""Calculates the Request-Digest. See :rfc:`2617` section 3.2.2.1.
ha1
The HA1 string obtained from the credentials store.
entity_body
If 'qop' is set to 'auth-int', then A2 includes a hash
of the "entity body". The entity body is the part of the
message which follows the HTTP headers. See :rfc:`2617` section
4.3. This refers to the entity the user agent sent in the request which
has the Authorization header. Typically GET requests don't have an entity,
and POST requests do.
"""
ha2 = self.HA2(entity_body)
# Request-Digest -- RFC 2617 3.2.2.1
if self.qop:
req = "%s:%s:%s:%s:%s" % (self.nonce, self.nc, self.cnonce, self.qop, ha2)
else:
req = "%s:%s" % (self.nonce, ha2)
# RFC 2617 3.2.2.2
#
# If the "algorithm" directive's value is "MD5" or is unspecified, then A1 is:
# A1 = unq(username-value) ":" unq(realm-value) ":" passwd
#
# If the "algorithm" directive's value is "MD5-sess", then A1 is
# calculated only once - on the first request by the client following
# receipt of a WWW-Authenticate challenge from the server.
# A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd )
# ":" unq(nonce-value) ":" unq(cnonce-value)
if self.algorithm == 'MD5-sess':
ha1 = H('%s:%s:%s' % (ha1, self.nonce, self.cnonce))
digest = H('%s:%s' % (ha1, req))
return digest
def www_authenticate(realm, key, algorithm='MD5', nonce=None, qop=qop_auth, stale=False):
"""Constructs a WWW-Authenticate header for Digest authentication."""
if qop not in valid_qops:
raise ValueError("Unsupported value for qop: '%s'" % qop)
if algorithm not in valid_algorithms:
raise ValueError("Unsupported value for algorithm: '%s'" % algorithm)
if nonce is None:
nonce = synthesize_nonce(realm, key)
s = 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % (
realm, nonce, algorithm, qop)
if stale:
s += ', stale="true"'
return s
def digest_auth(realm, get_ha1, key, debug=False):
"""A CherryPy tool which hooks at before_handler to perform
HTTP Digest Access Authentication, as specified in :rfc:`2617`.
If the request has an 'authorization' header with a 'Digest' scheme, this
tool authenticates the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not "Digest", or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Digest header.
realm
A string containing the authentication realm.
get_ha1
A callable which looks up a username in a credentials store
and returns the HA1 string, which is defined in the RFC to be
MD5(username : realm : password). The function's signature is:
``get_ha1(realm, username)``
where username is obtained from the request's 'authorization' header.
If username is not found in the credentials store, get_ha1() returns
None.
key
A secret string known only to the server, used in the synthesis of nonces.
"""
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
nonce_is_stale = False
if auth_header is not None:
try:
auth = HttpDigestAuthorization(auth_header, request.method, debug=debug)
except ValueError:
raise cherrypy.HTTPError(400, "The Authorization header could not be parsed.")
if debug:
TRACE(str(auth))
if auth.validate_nonce(realm, key):
ha1 = get_ha1(realm, auth.username)
if ha1 is not None:
# note that for request.body to be available we need to hook in at
# before_handler, not on_start_resource like 3.1.x digest_auth does.
digest = auth.request_digest(ha1, entity_body=request.body)
if digest == auth.response: # authenticated
if debug:
TRACE("digest matches auth.response")
# Now check if nonce is stale.
# The choice of ten minutes' lifetime for nonce is somewhat arbitrary
nonce_is_stale = auth.is_nonce_stale(max_age_seconds=600)
if not nonce_is_stale:
request.login = auth.username
if debug:
TRACE("authentication of %s successful" % auth.username)
return
# Respond with 401 status and a WWW-Authenticate header
header = www_authenticate(realm, key, stale=nonce_is_stale)
if debug:
TRACE(header)
cherrypy.serving.response.headers['WWW-Authenticate'] = header
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
sobercoder/gem5
|
refs/heads/master
|
src/unittest/stattestmain.py
|
8
|
def main():
from _m5.stattest import stattest_init, stattest_run
import m5.stats
stattest_init()
# Initialize the global statistics
m5.stats.initSimStats()
m5.stats.initText("cout")
# We're done registering statistics. Enable the stats package now.
m5.stats.enable()
# Reset to put the stats in a consistent state.
m5.stats.reset()
stattest_run()
m5.stats.dump()
|
yceruto/django-guardian
|
refs/heads/master
|
guardian/conf/settings.py
|
30
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
ANONYMOUS_DEFAULT_USERNAME_VALUE = getattr(settings,
'ANONYMOUS_DEFAULT_USERNAME_VALUE', 'AnonymousUser')
ANONYMOUS_USER_ID = getattr(settings, 'ANONYMOUS_USER_ID', None)
if ANONYMOUS_USER_ID is None:
raise ImproperlyConfigured("In order to use django-guardian's "
"ObjectPermissionBackend authorization backend you have to configure "
"ANONYMOUS_USER_ID at your settings module")
RENDER_403 = getattr(settings, 'GUARDIAN_RENDER_403', False)
TEMPLATE_403 = getattr(settings, 'GUARDIAN_TEMPLATE_403', '403.html')
RAISE_403 = getattr(settings, 'GUARDIAN_RAISE_403', False)
def check_configuration():
if RENDER_403 and RAISE_403:
raise ImproperlyConfigured("Cannot use both GUARDIAN_RENDER_403 AND "
"GUARDIAN_RAISE_403 - only one of this config may be True")
check_configuration()
|
40223136/-2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/xml/sax/_exceptions.py
|
625
|
"""Different kinds of SAX Exceptions"""
#in brython the 4 lines below causes an $globals['Exception'] error
#import sys
#if sys.platform[:4] == "java":
# from java.lang import Exception
#del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to raise
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to raise the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is raised, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.getColumnNumber()
self._linenum = self._locator.getLineNumber()
def getColumnNumber(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
linenum = self.getLineNumber()
if linenum is None:
linenum = "?"
colnum = self.getColumnNumber()
if colnum is None:
colnum = "?"
return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted."""
pass
|
UQ-UQx/old_injestor
|
refs/heads/master
|
services/coursestructure/service.py
|
1
|
#!/usr/bin/python
import os
import lxml
import time
import json
import pymongo
from datetime import datetime
from bson.objectid import ObjectId
import baseservice
from lxml import etree
import pprint
basepath = os.path.dirname(__file__)
class Coursestructure(baseservice.BaseService):
inst = None
def __init__(self):
Coursestructure.inst = self
super(Coursestructure, self).__init__()
self.status['name'] = "Course Structure"
self.mongo_enabled = False
self.mongo_dbname = ""
self.mongo_collectionname = ""
self.mongo_files = []
self.outputdir = 'www/course_structure'
#Private
self.initialize()
def setup(self):
#Get meta-data from info.json
pass
def run(self):
self.setaction('Parsing courses')
self.status['status'] = 'running'
filepaths = baseservice.getdatafilepaths(self.servicename)
loaded = False
for dirname, dirnames, filenames in os.walk(filepaths['incoming']):
innerpath = dirname.replace(filepaths['incoming'], "")
dircount = innerpath.count('/')
if dircount == 0:
#for files not in a directory
for filename in filenames:
self.movefiletofinish(dirname,filename)
if dircount == 1:
self.parsecourse(dirname)
self.movedirtofinish(dirname)
return loaded
def movefiletofinish(self, dirname, filename):
incpath = os.path.join(dirname, filename)
finpath = incpath.replace("incoming", "finished", 1)
os.rename(incpath, finpath)
def movedirtofinish(self, dirname):
incpath = dirname
finpath = incpath.replace("incoming", "finished", 1)
print incpath
print finpath
os.rename(incpath, finpath)
def parsecourse(self, path):
coursename = os.path.basename(os.path.normpath(path))
self.setaction("Parsing course: "+coursename)
self.status['progress']['total'] = 10
self.status['progress']['current'] = 0
coursesplit = coursename.split("-")
term = coursesplit[-1]
#Build the XML
course = {}
#Parse the course
coursefile = os.path.join(path, 'course', term + '.xml')
course = self.xml_unpack_file(coursefile)
self.status['progress']['current'] = 4
course = self.add_linked_file_xml(path,course)
policyfileurl = os.path.join(path, 'policies', term, 'policy.json')
policyfile=open(policyfileurl).read()
policydata = json.loads(policyfile)
course['policy'] = policydata
self.status['progress']['current'] = 8
f = open(self.outputdir+'/'+coursename+'.json', 'w+')
self.status['progress']['current'] = 9
f.write(json.dumps(course))
self.status['progress']['current'] = 10
def print_course(self,course):
pp = pprint.PrettyPrinter(indent=4)
for chapter in course['children']:
print 'Chapter: '+chapter['display_name']
for sequence in chapter['children']:
print '\tSequence: '+sequence['display_name']
for vertical in sequence['children']:
print '\t\tVertical: '+vertical['display_name']
for something in vertical['children']:
display_name = 'Unknown'
if 'display_name' in something:
display_name = something['display_name']
print '\t\t\t'+something['tag']+': '+display_name
print something
def add_linked_file_xml(self, basepath, xml_object):
if len(xml_object['children']) > 0:
index = 0
for child in xml_object['children']:
if len(child['children']) == 0 and 'url_name' in child:
child_path = os.path.join(basepath,child['tag'],child['url_name']+'.xml')
if os.path.isfile(child_path):
child_obj = (self.xml_unpack_file(child_path))
for key in child_obj:
child[key] = child_obj[key]
xml_object['children'][index] = self.add_linked_file_xml(basepath,child)
index += 1
return xml_object
def name():
return str("coursestructure")
def status():
return Coursestructure.inst.status
def runservice():
return Coursestructure()
class objectJSONEncoder(json.JSONEncoder):
"""A specialized JSON encoder that can handle simple lxml objectify types
>>> from lxml import objectify
>>> obj = objectify.fromstring("<Book><price>1.50</price><author>W. Shakespeare</author></Book>")
>>> objectJSONEncoder().encode(obj)
'{"price": 1.5, "author": "W. Shakespeare"}'
"""
def default(self, o):
if isinstance(o, lxml.objectify.IntElement):
return int(o)
if isinstance(o, lxml.objectify.NumberElement) or isinstance(o, lxml.objectify.FloatElement):
return float(o)
if isinstance(o, lxml.objectify.ObjectifiedDataElement):
return str(o)
if hasattr(o, '__dict__'):
#For objects with a __dict__, return the encoding of the __dict__
return o.__dict__
return json.JSONEncoder.default(self, o)
|
ramitalat/odoo
|
refs/heads/8.0
|
addons/product/pricelist.py
|
23
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import except_orm
import openerp.addons.decimal_precision as dp
class price_type(osv.osv):
"""
The price type is used to points which field in the product form
is a price and in which currency is this price expressed.
When a field is a price, you can use it in pricelists to base
sale and purchase prices based on some fields of the product.
"""
def _price_field_get(self, cr, uid, context=None):
mf = self.pool.get('ir.model.fields')
ids = mf.search(cr, uid, [('model','in', (('product.product'),('product.template'))), ('ttype','=','float')], context=context)
res = []
for field in mf.browse(cr, uid, ids, context=context):
res.append((field.name, field.field_description))
return res
def _get_field_currency(self, cr, uid, fname, ctx):
ids = self.search(cr, uid, [('field','=',fname)], context=ctx)
return self.browse(cr, uid, ids, context=ctx)[0].currency_id
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr,uid,uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_name = "product.price.type"
_description = "Price Type"
_columns = {
"name" : fields.char("Price Name", required=True, translate=True, help="Name of this kind of price."),
"active" : fields.boolean("Active"),
"field" : fields.selection(_price_field_get, "Product Field", size=32, required=True, help="Associated field in the product form."),
"currency_id" : fields.many2one('res.currency', "Currency", required=True, help="The currency the field is expressed in."),
}
_defaults = {
"active": lambda *args: True,
"currency_id": _get_currency
}
#----------------------------------------------------------
# Price lists
#----------------------------------------------------------
class product_pricelist_type(osv.osv):
_name = "product.pricelist.type"
_description = "Pricelist Type"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'key': fields.char('Key', required=True, help="Used in the code to select specific prices based on the context. Keep unchanged."),
}
class product_pricelist(osv.osv):
def _pricelist_type_get(self, cr, uid, context=None):
pricelist_type_obj = self.pool.get('product.pricelist.type')
pricelist_type_ids = pricelist_type_obj.search(cr, uid, [], order='name')
pricelist_types = pricelist_type_obj.read(cr, uid, pricelist_type_ids, ['key','name'], context=context)
res = []
for type in pricelist_types:
res.append((type['key'],type['name']))
return res
_name = "product.pricelist"
_description = "Pricelist"
_order = 'name'
_columns = {
'name': fields.char('Pricelist Name', required=True, translate=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the pricelist without removing it."),
'type': fields.selection(_pricelist_type_get, 'Pricelist Type', required=True),
'version_id': fields.one2many('product.pricelist.version', 'pricelist_id', 'Pricelist Versions', copy=True),
'currency_id': fields.many2one('res.currency', 'Currency', required=True),
'company_id': fields.many2one('res.company', 'Company'),
}
def name_get(self, cr, uid, ids, context=None):
result= []
if not all(ids):
return result
for pl in self.browse(cr, uid, ids, context=context):
name = pl.name + ' ('+ pl.currency_id.name + ')'
result.append((pl.id,name))
return result
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if name and operator == '=' and not args:
# search on the name of the pricelist and its currency, opposite of name_get(),
# Used by the magic context filter in the product search view.
query_args = {'name': name, 'limit': limit, 'lang': (context or {}).get('lang') or 'en_US'}
query = """SELECT p.id
FROM ((
SELECT pr.id, pr.name
FROM product_pricelist pr JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE pr.name || ' (' || cur.name || ')' = %(name)s
)
UNION (
SELECT tr.res_id as id, tr.value as name
FROM ir_translation tr JOIN
product_pricelist pr ON (
pr.id = tr.res_id AND
tr.type = 'model' AND
tr.name = 'product.pricelist,name' AND
tr.lang = %(lang)s
) JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE tr.value || ' (' || cur.name || ')' = %(name)s
)
) p
ORDER BY p.name"""
if limit:
query += " LIMIT %(limit)s"
cr.execute(query, query_args)
ids = [r[0] for r in cr.fetchall()]
# regular search() to apply ACLs - may limit results below limit in some cases
ids = self.search(cr, uid, [('id', 'in', ids)], limit=limit, context=context)
if ids:
return self.name_get(cr, uid, ids, context)
return super(product_pricelist, self).name_search(
cr, uid, name, args, operator=operator, context=context, limit=limit)
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr, uid, uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_defaults = {
'active': lambda *a: 1,
"currency_id": _get_currency
}
def price_get_multi(self, cr, uid, ids, products_by_qty_by_partner, context=None):
return dict((key, dict((key, price[0]) for key, price in value.items())) for key, value in self.price_rule_get_multi(cr, uid, ids, products_by_qty_by_partner, context=context).items())
def price_rule_get_multi(self, cr, uid, ids, products_by_qty_by_partner, context=None):
"""multi products 'price_get'.
@param ids:
@param products_by_qty:
@param partner:
@param context: {
'date': Date of the pricelist (%Y-%m-%d),}
@return: a dict of dict with product_id as key and a dict 'price by pricelist' as value
"""
if not ids:
ids = self.pool.get('product.pricelist').search(cr, uid, [], context=context)
results = {}
for pricelist in self.browse(cr, uid, ids, context=context):
subres = self._price_rule_get_multi(cr, uid, pricelist, products_by_qty_by_partner, context=context)
for product_id,price in subres.items():
results.setdefault(product_id, {})
results[product_id][pricelist.id] = price
return results
def _price_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
return dict((key, price[0]) for key, price in self._price_rule_get_multi(cr, uid, pricelist, products_by_qty_by_partner, context=context).items())
def _price_rule_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
context = context or {}
date = context.get('date') or time.strftime('%Y-%m-%d')
products = map(lambda x: x[0], products_by_qty_by_partner)
currency_obj = self.pool.get('res.currency')
product_obj = self.pool.get('product.template')
product_uom_obj = self.pool.get('product.uom')
price_type_obj = self.pool.get('product.price.type')
if not products:
return {}
version = False
for v in pricelist.version_id:
if ((v.date_start is False) or (v.date_start <= date)) and ((v.date_end is False) or (v.date_end >= date)):
version = v
break
if not version:
raise osv.except_osv(_('Warning!'), _("At least one pricelist has no active version !\nPlease create or activate one."))
categ_ids = {}
for p in products:
categ = p.categ_id
while categ:
categ_ids[categ.id] = True
categ = categ.parent_id
categ_ids = categ_ids.keys()
is_product_template = products[0]._name == "product.template"
if is_product_template:
prod_tmpl_ids = [tmpl.id for tmpl in products]
prod_ids = [product.id for product in tmpl.product_variant_ids for tmpl in products]
else:
prod_ids = [product.id for product in products]
prod_tmpl_ids = [product.product_tmpl_id.id for product in products]
# Load all rules
cr.execute(
'SELECT i.id '
'FROM product_pricelist_item AS i '
'WHERE (product_tmpl_id IS NULL OR product_tmpl_id = any(%s)) '
'AND (product_id IS NULL OR (product_id = any(%s))) '
'AND ((categ_id IS NULL) OR (categ_id = any(%s))) '
'AND (price_version_id = %s) '
'ORDER BY sequence, min_quantity desc',
(prod_tmpl_ids, prod_ids, categ_ids, version.id))
item_ids = [x[0] for x in cr.fetchall()]
items = self.pool.get('product.pricelist.item').browse(cr, uid, item_ids, context=context)
price_types = {}
results = {}
for product, qty, partner in products_by_qty_by_partner:
results[product.id] = 0.0
rule_id = False
price = False
# Final unit price is computed according to `qty` in the `qty_uom_id` UoM.
# An intermediary unit price may be computed according to a different UoM, in
# which case the price_uom_id contains that UoM.
# The final price will be converted to match `qty_uom_id`.
qty_uom_id = context.get('uom') or product.uom_id.id
price_uom_id = product.uom_id.id
qty_in_product_uom = qty
if qty_uom_id != product.uom_id.id:
try:
qty_in_product_uom = product_uom_obj._compute_qty(
cr, uid, context['uom'], qty, product.uom_id.id or product.uos_id.id)
except except_orm:
# Ignored - incompatible UoM in context, use default product UoM
pass
for rule in items:
if rule.min_quantity and qty_in_product_uom < rule.min_quantity:
continue
if is_product_template:
if rule.product_tmpl_id and product.id != rule.product_tmpl_id.id:
continue
if rule.product_id:
continue
else:
if rule.product_tmpl_id and product.product_tmpl_id.id != rule.product_tmpl_id.id:
continue
if rule.product_id and product.id != rule.product_id.id:
continue
if rule.categ_id:
cat = product.categ_id
while cat:
if cat.id == rule.categ_id.id:
break
cat = cat.parent_id
if not cat:
continue
if rule.base == -1:
if rule.base_pricelist_id:
price_tmp = self._price_get_multi(cr, uid,
rule.base_pricelist_id, [(product,
qty, False)], context=context)[product.id]
ptype_src = rule.base_pricelist_id.currency_id.id
price_uom_id = qty_uom_id
price = currency_obj.compute(cr, uid,
ptype_src, pricelist.currency_id.id,
price_tmp, round=False,
context=context)
elif rule.base == -2:
seller = False
for seller_id in product.seller_ids:
if (not partner) or (seller_id.name.id != partner):
continue
seller = seller_id
if not seller and product.seller_ids:
seller = product.seller_ids[0]
if seller:
qty_in_seller_uom = qty
seller_uom = seller.product_uom.id
if qty_uom_id != seller_uom:
qty_in_seller_uom = product_uom_obj._compute_qty(cr, uid, qty_uom_id, qty, to_uom_id=seller_uom)
price_uom_id = seller_uom
for line in seller.pricelist_ids:
if line.min_quantity <= qty_in_seller_uom:
price = line.price
else:
if rule.base not in price_types:
price_types[rule.base] = price_type_obj.browse(cr, uid, int(rule.base))
price_type = price_types[rule.base]
# price_get returns the price in the context UoM, i.e. qty_uom_id
price_uom_id = qty_uom_id
price = currency_obj.compute(
cr, uid,
price_type.currency_id.id, pricelist.currency_id.id,
product_obj._price_get(cr, uid, [product], price_type.field, context=context)[product.id],
round=False, context=context)
if price is not False:
price_limit = price
price = price * (1.0+(rule.price_discount or 0.0))
if rule.price_round:
price = tools.float_round(price, precision_rounding=rule.price_round)
convert_to_price_uom = (lambda price: product_uom_obj._compute_price(
cr, uid, product.uom_id.id,
price, price_uom_id))
if rule.price_surcharge:
price_surcharge = convert_to_price_uom(rule.price_surcharge)
price += price_surcharge
if rule.price_min_margin:
price_min_margin = convert_to_price_uom(rule.price_min_margin)
price = max(price, price_limit + price_min_margin)
if rule.price_max_margin:
price_max_margin = convert_to_price_uom(rule.price_max_margin)
price = min(price, price_limit + price_max_margin)
rule_id = rule.id
break
# Final price conversion to target UoM
price = product_uom_obj._compute_price(cr, uid, price_uom_id, price, qty_uom_id)
results[product.id] = (price, rule_id)
return results
def price_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None):
return dict((key, price[0]) for key, price in self.price_rule_get(cr, uid, ids, prod_id, qty, partner=partner, context=context).items())
def price_rule_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None):
product = self.pool.get('product.product').browse(cr, uid, prod_id, context=context)
res_multi = self.price_rule_get_multi(cr, uid, ids, products_by_qty_by_partner=[(product, qty, partner)], context=context)
res = res_multi[prod_id]
return res
class product_pricelist_version(osv.osv):
_name = "product.pricelist.version"
_description = "Pricelist Version"
_columns = {
'pricelist_id': fields.many2one('product.pricelist', 'Price List',
required=True, select=True, ondelete='cascade'),
'name': fields.char('Name', required=True, translate=True),
'active': fields.boolean('Active',
help="When a version is duplicated it is set to non active, so that the " \
"dates do not overlaps with original version. You should change the dates " \
"and reactivate the pricelist", copy=False),
'items_id': fields.one2many('product.pricelist.item',
'price_version_id', 'Price List Items', required=True, copy=True),
'date_start': fields.date('Start Date', help="First valid date for the version."),
'date_end': fields.date('End Date', help="Last valid date for the version."),
'company_id': fields.related('pricelist_id','company_id',type='many2one',
readonly=True, relation='res.company', string='Company', store=True)
}
_defaults = {
'active': lambda *a: 1,
}
def _check_date(self, cursor, user, ids, context=None):
for pricelist_version in self.browse(cursor, user, ids, context=context):
if not pricelist_version.active:
continue
where = []
if pricelist_version.date_start:
where.append("((date_end>='%s') or (date_end is null))" % (pricelist_version.date_start,))
if pricelist_version.date_end:
where.append("((date_start<='%s') or (date_start is null))" % (pricelist_version.date_end,))
cursor.execute('SELECT id ' \
'FROM product_pricelist_version ' \
'WHERE '+' and '.join(where) + (where and ' and ' or '')+
'pricelist_id = %s ' \
'AND active ' \
'AND id <> %s', (
pricelist_version.pricelist_id.id,
pricelist_version.id))
if cursor.fetchall():
return False
return True
_constraints = [
(_check_date, 'You cannot have 2 pricelist versions that overlap!',
['date_start', 'date_end'])
]
class product_pricelist_item(osv.osv):
def _price_field_get(self, cr, uid, context=None):
pt = self.pool.get('product.price.type')
ids = pt.search(cr, uid, [], context=context)
result = []
for line in pt.browse(cr, uid, ids, context=context):
result.append((line.id, line.name))
result.append((-1, _('Other Pricelist')))
result.append((-2, _('Supplier Prices on the product form')))
return result
# Added default function to fetch the Price type Based on Pricelist type.
def _get_default_base(self, cr, uid, fields, context=None):
product_price_type_obj = self.pool.get('product.price.type')
if fields.get('type') == 'purchase':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field', '=', 'standard_price')], context=context)
elif fields.get('type') == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
else:
return -1
if not product_price_type_ids:
return False
else:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
return pricetype.id
_name = "product.pricelist.item"
_description = "Pricelist item"
_order = "sequence, min_quantity desc"
_defaults = {
'base': _get_default_base,
'min_quantity': lambda *a: 0,
'sequence': lambda *a: 5,
'price_discount': lambda *a: 0,
}
def _check_recursion(self, cr, uid, ids, context=None):
for obj_list in self.browse(cr, uid, ids, context=context):
if obj_list.base == -1:
main_pricelist = obj_list.price_version_id.pricelist_id.id
other_pricelist = obj_list.base_pricelist_id.id
if main_pricelist == other_pricelist:
return False
return True
def _check_margin(self, cr, uid, ids, context=None):
for item in self.browse(cr, uid, ids, context=context):
if item.price_max_margin and item.price_min_margin and (item.price_min_margin > item.price_max_margin):
return False
return True
_columns = {
'name': fields.char('Rule Name', help="Explicit rule name for this pricelist line."),
'price_version_id': fields.many2one('product.pricelist.version', 'Price List Version', required=True, select=True, ondelete='cascade'),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', ondelete='cascade', help="Specify a template if this rule only applies to one product template. Keep empty otherwise."),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Specify a product if this rule only applies to one product. Keep empty otherwise."),
'categ_id': fields.many2one('product.category', 'Product Category', ondelete='cascade', help="Specify a product category if this rule only applies to products belonging to this category or its children categories. Keep empty otherwise."),
'min_quantity': fields.integer('Min. Quantity', required=True,
help="For the rule to apply, bought/sold quantity must be greater "
"than or equal to the minimum quantity specified in this field.\n"
"Expressed in the default UoM of the product."
),
'sequence': fields.integer('Sequence', required=True, help="Gives the order in which the pricelist items will be checked. The evaluation gives highest priority to lowest sequence and stops as soon as a matching item is found."),
'base': fields.selection(_price_field_get, 'Based on', required=True, size=-1, help="Base price for computation."),
'base_pricelist_id': fields.many2one('product.pricelist', 'Other Pricelist'),
'price_surcharge': fields.float('Price Surcharge',
digits_compute= dp.get_precision('Product Price'), help='Specify the fixed amount to add or substract(if negative) to the amount calculated with the discount.'),
'price_discount': fields.float('Price Discount', digits=(16,4)),
'price_round': fields.float('Price Rounding',
digits_compute= dp.get_precision('Product Price'),
help="Sets the price so that it is a multiple of this value.\n" \
"Rounding is applied after the discount and before the surcharge.\n" \
"To have prices that end in 9.99, set rounding 10, surcharge -0.01" \
),
'price_min_margin': fields.float('Min. Price Margin',
digits_compute= dp.get_precision('Product Price'), help='Specify the minimum amount of margin over the base price.'),
'price_max_margin': fields.float('Max. Price Margin',
digits_compute= dp.get_precision('Product Price'), help='Specify the maximum amount of margin over the base price.'),
'company_id': fields.related('price_version_id','company_id',type='many2one',
readonly=True, relation='res.company', string='Company', store=True)
}
_constraints = [
(_check_recursion, 'Error! You cannot assign the Main Pricelist as Other Pricelist in PriceList Item!', ['base_pricelist_id']),
(_check_margin, 'Error! The minimum margin should be lower than the maximum margin.', ['price_min_margin', 'price_max_margin'])
]
def product_id_change(self, cr, uid, ids, product_id, context=None):
if not product_id:
return {}
prod = self.pool.get('product.product').read(cr, uid, [product_id], ['code','name'])
if prod[0]['code']:
return {'value': {'name': prod[0]['code']}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mfherbst/spack
|
refs/heads/develop
|
lib/spack/external/_pytest/terminal.py
|
23
|
""" terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
from __future__ import absolute_import, division, print_function
import itertools
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
import pytest
import py
import sys
import time
import platform
from _pytest import nodes
import _pytest._pluggy as pluggy
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('-v', '--verbose', action="count",
dest="verbose", default=0, help="increase verbosity."),
group._addoption('-q', '--quiet', action="count",
dest="quiet", default=0, help="decrease verbosity."),
group._addoption('-r',
action="store", dest="reportchars", default='', metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(E)error, (s)skipped, (x)failed, (X)passed, "
"(p)passed, (P)passed with output, (a)all except pP. "
"Warnings are displayed at all times except when "
"--disable-warnings is set")
group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False,
dest='disable_warnings', action='store_true',
help='disable warnings summary')
group._addoption('-l', '--showlocals',
action="store_true", dest="showlocals", default=False,
help="show locals in tracebacks (disabled by default).")
group._addoption('--tb', metavar="style",
action="store", dest="tbstyle", default='auto',
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
help="traceback print mode (auto/long/short/line/native/no).")
group._addoption('--fulltrace', '--full-trace',
action="store_true", default=False,
help="don't cut any tracebacks (default is to cut).")
group._addoption('--color', metavar="color",
action="store", dest="color", default='auto',
choices=['yes', 'no', 'auto'],
help="color terminal output (yes/no/auto).")
def pytest_configure(config):
config.option.verbose -= config.option.quiet
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, 'terminalreporter')
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config):
reportopts = ""
reportchars = config.option.reportchars
if not config.option.disable_warnings and 'w' not in reportchars:
reportchars += 'w'
elif config.option.disable_warnings and 'w' in reportchars:
reportchars = reportchars.replace('w', '')
if reportchars:
for char in reportchars:
if char not in reportopts and char != 'a':
reportopts += char
elif char == 'a':
reportopts = 'fEsxXw'
return reportopts
def pytest_report_teststatus(report):
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
elif report.failed:
letter = "F"
if report.when != "call":
letter = "f"
return report.outcome, letter, report.outcome.upper()
class WarningReport:
"""
Simple structure to hold warnings information captured by ``pytest_logwarning``.
"""
def __init__(self, code, message, nodeid=None, fslocation=None):
"""
:param code: unused
:param str message: user friendly message about the warning
:param str|None nodeid: node id that generated the warning (see ``get_location``).
:param tuple|py.path.local fslocation:
file system location of the source of the warning (see ``get_location``).
"""
self.code = code
self.message = message
self.nodeid = nodeid
self.fslocation = fslocation
def get_location(self, config):
"""
Returns the more user-friendly information about the location
of a warning, or None.
"""
if self.nodeid:
return self.nodeid
if self.fslocation:
if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
filename, linenum = self.fslocation[:2]
relpath = py.path.local(filename).relto(config.invocation_dir)
return '%s:%s' % (relpath, linenum)
else:
return str(self.fslocation)
return None
class TerminalReporter:
def __init__(self, config, file=None):
import _pytest.config
self.config = config
self.verbosity = self.config.option.verbose
self.showheader = self.verbosity >= 0
self.showfspath = self.verbosity >= 0
self.showlongtestinfo = self.verbosity > 0
self._numcollected = 0
self.stats = {}
self.startdir = py.path.local()
if file is None:
file = sys.stdout
self._tw = self.writer = _pytest.config.create_terminal_writer(config,
file)
self.currentfspath = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
def hasopt(self, char):
char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid, res):
fspath = self.config.rootdir.join(nodeid.split("::")[0])
if fspath != self.currentfspath:
self.currentfspath = fspath
fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
self._tw.write(fspath + " ")
self._tw.write(res)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content, **markup):
self._tw.write(content, **markup)
def write_line(self, line, **markup):
if not py.builtin._istext(line):
line = py.builtin.text(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line, **markup):
"""
Rewinds the terminal cursor to the beginning and writes the given line.
:kwarg erase: if True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
"""
erase = markup.pop('erase', False)
if erase:
fill_count = self._tw.fullwidth - len(line)
fill = ' ' * fill_count
else:
fill = ''
line = str(line)
self._tw.write("\r" + line + fill, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def section(self, title, sep="=", **kw):
self._tw.sep(sep, title, **kw)
def line(self, msg, **kw):
self._tw.line(msg, **kw)
def pytest_internalerror(self, excrepr):
for line in py.builtin.text(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return 1
def pytest_logwarning(self, code, fslocation, message, nodeid):
warnings = self.stats.setdefault("warnings", [])
warning = WarningReport(code=code, fslocation=fslocation,
message=message, nodeid=nodeid)
warnings.append(warning)
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" % (plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_deselected(self, items):
self.stats.setdefault('deselected', []).extend(items)
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
elif self.showfspath:
fsid = nodeid.split("::")[0]
self.write_fspath_result(fsid, "")
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep)
cat, letter, word = res
self.stats.setdefault(cat, []).append(rep)
self._tests_ran = True
if not letter and not word:
# probably passed setup/teardown
return
if self.verbosity <= 0:
if not hasattr(rep, 'node') and self.showfspath:
self.write_fspath_result(rep.nodeid, letter)
else:
self._tw.write(letter)
else:
if isinstance(word, tuple):
word, markup = word
else:
if rep.passed:
markup = {'green': True}
elif rep.failed:
markup = {'red': True}
elif rep.skipped:
markup = {'yellow': True}
line = self._locationline(rep.nodeid, *rep.location)
if not hasattr(rep, 'node'):
self.write_ensure_prefix(line, word, **markup)
# self._tw.write(word, **markup)
else:
self.ensure_newline()
if hasattr(rep, 'node'):
self._tw.write("[%s] " % rep.node.gateway.id)
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def pytest_collection(self):
if not self.isatty and self.config.option.verbose >= 1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
if report.failed:
self.stats.setdefault("error", []).append(report)
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
# self.write_fspath_result(report.nodeid, 'E')
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
errors = len(self.stats.get('error', []))
skipped = len(self.stats.get('skipped', []))
if final:
line = "collected "
else:
line = "collecting "
line += str(self._numcollected) + " item" + ('' if self._numcollected == 1 else 's')
if errors:
line += " / %d errors" % errors
if skipped:
line += " / %d skipped" % skipped
if self.isatty:
self.rewrite(line, bold=True, erase=True)
if final:
self.write('\n')
else:
self.write_line(line)
def pytest_collection_modifyitems(self):
self.report_collect(True)
@pytest.hookimpl(trylast=True)
def pytest_sessionstart(self, session):
self._sessionstarttime = time.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
if hasattr(sys, 'pypy_version_info'):
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
msg += ", pytest-%s, py-%s, pluggy-%s" % (
pytest.__version__, py.__version__, pluggy.__version__)
if self.verbosity > 0 or self.config.option.debug or \
getattr(self.config.option, 'pastebin', None):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir)
self._write_report_lines_from_hooks(lines)
def _write_report_lines_from_hooks(self, lines):
lines.reverse()
for line in flatten(lines):
self.write_line(line)
def pytest_report_header(self, config):
inifile = ""
if config.inifile:
inifile = " " + config.rootdir.bestrelpath(config.inifile)
lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append(
"plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return lines
def pytest_collection_finish(self, session):
if self.config.option.collectonly:
self._printcollecteditems(session.items)
if self.stats.get('failed'):
self._tw.sep("!", "collection failures")
for rep in self.stats.get('failed'):
rep.toterminal(self._tw)
return 1
return 0
lines = self.config.hook.pytest_report_collectionfinish(
config=self.config, startdir=self.startdir, items=session.items)
self._write_report_lines_from_hooks(lines)
def _printcollecteditems(self, items):
# to print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {}
for item in items:
name = item.nodeid.split('::', 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
nodeid = item.nodeid
nodeid = nodeid.replace("::()::", "::")
self._tw.line(nodeid)
return
stack = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[:len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack):]:
stack.append(col)
# if col.name == "()":
# continue
indent = (len(stack) - 1) * " "
self._tw.line("%s%s" % (indent, col))
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
EXIT_NOTESTSCOLLECTED)
if exitstatus in summary_exit_codes:
self.config.hook.pytest_terminal_summary(terminalreporter=self,
exitstatus=exitstatus)
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
if exitstatus == EXIT_INTERRUPTED:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
self.summary_deselected()
self.summary_stats()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self):
if hasattr(self, '_keyboardinterrupt_memo'):
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self):
excrepr = self._keyboardinterrupt_memo
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True)
excrepr.reprcrash.toterminal(self._tw)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[:-len(domain)]
values = domain.split("[")
values[0] = values[0].replace('.', '::') # don't replace '.' in params
line += "[".join(values)
return line
# collect_fspath comes from testid which has a "/"-normalized path
if fspath:
res = mkrel(nodeid).replace("::()", "") # parens-normalization
if nodeid.split("::")[0] != fspath.replace("\\", nodes.SEP):
res += " <- " + self.startdir.bestrelpath(fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
if hasattr(rep, 'location'):
fspath, lineno, domain = rep.location
return domain
else:
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# summaries for sessionfinish
#
def getreports(self, name):
values = []
for x in self.stats.get(name, []):
if not hasattr(x, '_pdbshown'):
values.append(x)
return values
def summary_warnings(self):
if self.hasopt("w"):
all_warnings = self.stats.get("warnings")
if not all_warnings:
return
grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config))
self.write_sep("=", "warnings summary", yellow=True, bold=False)
for location, warnings in grouped:
self._tw.line(str(location) or '<undetermined location>')
for w in warnings:
lines = w.message.splitlines()
indented = '\n'.join(' ' + x for x in lines)
self._tw.line(indented)
self._tw.line()
self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html')
def summary_passes(self):
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports = self.getreports('passed')
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self._outrep_summary(rep)
def print_teardown_sections(self, rep):
for secname, content in rep.sections:
if 'teardown' in secname:
self._tw.sep('-', secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('failed')
if not reports:
return
self.write_sep("=", "FAILURES")
for rep in reports:
if self.config.option.tbstyle == "line":
line = self._getcrashline(rep)
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
markup = {'red': True, 'bold': True}
self.write_sep("_", msg, **markup)
self._outrep_summary(rep)
for report in self.getreports(''):
if report.nodeid == rep.nodeid and report.when == 'teardown':
self.print_teardown_sections(report)
def summary_errors(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('error')
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats['error']:
msg = self._getfailureheadline(rep)
if not hasattr(rep, 'when'):
# collect
msg = "ERROR collecting " + msg
elif rep.when == "setup":
msg = "ERROR at setup of " + msg
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
self._outrep_summary(rep)
def _outrep_summary(self, rep):
rep.toterminal(self._tw)
for secname, content in rep.sections:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self):
session_duration = time.time() - self._sessionstarttime
(line, color) = build_summary_stats_line(self.stats)
msg = "%s in %.2f seconds" % (line, session_duration)
markup = {color: True, 'bold': True}
if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
def summary_deselected(self):
if 'deselected' in self.stats:
self.write_sep("=", "%d tests deselected" % (
len(self.stats['deselected'])), bold=True)
def repr_pythonversion(v=None):
if v is None:
v = sys.version_info
try:
return "%s.%s.%s-%s-%s" % v
except (TypeError, ValueError):
return str(v)
def flatten(values):
for x in values:
if isinstance(x, (list, tuple)):
for y in flatten(x):
yield y
else:
yield x
def build_summary_stats_line(stats):
keys = ("failed passed skipped deselected "
"xfailed xpassed warnings error").split()
unknown_key_seen = False
for key in stats.keys():
if key not in keys:
if key: # setup/teardown reports have an empty key, ignore them
keys.append(key)
unknown_key_seen = True
parts = []
for key in keys:
val = stats.get(key, None)
if val:
parts.append("%d %s" % (len(val), key))
if parts:
line = ", ".join(parts)
else:
line = "no tests ran"
if 'failed' in stats or 'error' in stats:
color = 'red'
elif 'warnings' in stats or unknown_key_seen:
color = 'yellow'
elif 'passed' in stats:
color = 'green'
else:
color = 'yellow'
return (line, color)
def _plugin_nameversions(plugininfo):
values = []
for plugin, dist in plugininfo:
# gets us name and version!
name = '{dist.project_name}-{dist.version}'.format(dist=dist)
# questionable convenience, but it keeps things short
if name.startswith("pytest-"):
name = name[7:]
# we decided to print python package names
# they can have more than one plugin
if name not in values:
values.append(name)
return values
|
nemesisdesign/openwisp2
|
refs/heads/master
|
openwisp_controller/config/base/base.py
|
1
|
import collections
import hashlib
import json
from copy import deepcopy
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from netjsonconfig.exceptions import ValidationError as SchemaError
from openwisp_utils.base import TimeStampedEditableModel
from .. import settings as app_settings
class BaseModel(TimeStampedEditableModel):
"""
Shared logic
"""
name = models.CharField(max_length=64, db_index=True)
class Meta:
abstract = True
def __str__(self):
return self.name
class BaseConfig(BaseModel):
"""
Base configuration management model logic shared between models
"""
backend = models.CharField(
_('backend'),
choices=app_settings.BACKENDS,
max_length=128,
help_text=_(
'Select <a href="http://netjsonconfig.openwisp.org/en/'
'stable/" target="_blank">netjsonconfig</a> backend'
),
)
config = JSONField(
_('configuration'),
default=dict,
help_text=_('configuration in NetJSON DeviceConfiguration format'),
load_kwargs={'object_pairs_hook': collections.OrderedDict},
dump_kwargs={'indent': 4},
)
__template__ = False
__vpn__ = False
class Meta:
abstract = True
def clean(self):
"""
* ensures config is not ``None``
* performs netjsonconfig backend validation
"""
if self.config is None:
self.config = {}
if not isinstance(self.config, dict):
raise ValidationError({'config': _('Unexpected configuration format.')})
# perform validation only if backend is defined, otherwise
# django will take care of notifying blank field error
if not self.backend:
return
try:
backend = self.backend_instance
except ImportError as e:
message = 'Error while importing "{0}": {1}'.format(self.backend, e)
raise ValidationError({'backend': message})
else:
self.clean_netjsonconfig_backend(backend)
def get_config(self):
"""
config preprocessing (skipped for templates):
* inserts hostname automatically if not present in config
"""
config = self.config or {} # might be ``None`` in some corner cases
if self.__template__:
return config
c = deepcopy(config)
is_config = not any([self.__template__, self.__vpn__])
if 'hostname' not in c.get('general', {}) and is_config:
c.setdefault('general', {})
c['general']['hostname'] = self.name.replace(':', '-')
return c
def get_context(self):
return app_settings.CONTEXT.copy()
@classmethod
def validate_netjsonconfig_backend(cls, backend):
"""
calls ``validate`` method of netjsonconfig backend
might trigger SchemaError
"""
# the following line is a trick needed to avoid cluttering
# an eventual ``ValidationError`` message with ``OrderedDict``
# which would make the error message hard to read
backend.config = json.loads(json.dumps(backend.config))
backend.validate()
@classmethod
def clean_netjsonconfig_backend(cls, backend):
"""
catches any ``SchemaError`` which will be redirected
to ``django.core.exceptions.ValdiationError``
"""
try:
cls.validate_netjsonconfig_backend(backend)
except SchemaError as e:
path = [str(el) for el in e.details.path]
trigger = '/'.join(path)
error = e.details.message
message = (
'Invalid configuration triggered by "#/{0}", '
'validator says:\n\n{1}'.format(trigger, error)
)
raise ValidationError(message)
@cached_property
def backend_class(self):
"""
returns netjsonconfig backend class
"""
return import_string(self.backend)
@cached_property
def backend_instance(self):
"""
returns netjsonconfig backend instance
"""
return self.get_backend_instance()
def get_backend_instance(self, template_instances=None):
"""
allows overriding config and templates
needed for pre validation of m2m
"""
backend = self.backend_class
kwargs = {'config': self.get_config()}
context = {}
# determine if we can pass templates
# expecting a many2many relationship
if hasattr(self, 'templates'):
if template_instances is None:
template_instances = self.templates.all()
templates_list = list()
for t in template_instances:
templates_list.append(t.config)
context.update(t.get_context())
kwargs['templates'] = templates_list
# pass context to backend if get_context method is defined
if hasattr(self, 'get_context'):
context.update(self.get_context())
kwargs['context'] = context
backend_instance = backend(**kwargs)
# remove accidentally duplicated files when combining config and templates
# this may happen if a device uses multiple VPN client templates
# which share the same Certification Authority, hence the CA
# is defined twice, which would raise ValidationError
if template_instances:
self._remove_duplicated_files(backend_instance)
return backend_instance
@classmethod
def _remove_duplicated_files(cls, backend_instance):
if 'files' not in backend_instance.config:
return
unique_files = []
for file in backend_instance.config['files']:
if file not in unique_files:
unique_files.append(file)
backend_instance.config['files'] = unique_files
def generate(self):
"""
shortcut for self.backend_instance.generate()
"""
return self.backend_instance.generate()
@property
def checksum(self):
"""
returns checksum of configuration
"""
config = self.generate().getvalue()
return hashlib.md5(config).hexdigest()
def json(self, dict=False, **kwargs):
"""
returns JSON representation of object
"""
config = self.backend_instance.config
if dict:
return config
return json.dumps(config, **kwargs)
|
lbartoletti/QGIS
|
refs/heads/master
|
scripts/qgis_fixes/fix_funcattrs.py
|
77
|
from lib2to3.fixes.fix_funcattrs import FixFuncattrs
|
andresmargalef/xbmc-plugin.video.ted.talks
|
refs/heads/helix
|
resources/lib/settings_test.py
|
1
|
import unittest
import settings
class TestSettings(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.enable_subtitles = settings.enable_subtitles
self.xbmc_language = settings.xbmc_language
self.subtitle_language = settings.subtitle_language
def tearDown(self):
# This is rubbish. Need to understand how to test Python better.
settings.enable_subtitles = self.enable_subtitles
settings.xbmc_language = self.xbmc_language
settings.subtitle_language = self.subtitle_language
unittest.TestCase.tearDown(self)
def test_get_subtitle_languages_disabled(self):
settings.enable_subtitles = 'false'
self.assertIsNone(settings.get_subtitle_languages())
def test_get_subtitle_languages_enabled_standard(self):
settings.enable_subtitles = 'true'
settings.xbmc_language = 'Portuguese'
settings.subtitle_language = "" # Default is "en", if pref unset then XBMC will replace with "".
self.assertEqual(['pt'], settings.get_subtitle_languages())
def test_get_subtitle_languages_enabled_standard_nomatch(self):
settings.enable_subtitles = 'true'
settings.xbmc_language = 'Klingon'
settings.subtitle_language = ''
self.assertEqual(None, settings.get_subtitle_languages())
def test_get_subtitle_languages_enabled_custom(self):
settings.enable_subtitles = 'true'
settings.subtitle_language = 'en,fr , de ,'
self.assertEqual(['en', 'fr', 'de'], settings.get_subtitle_languages())
|
GdZ/scriptfile
|
refs/heads/master
|
software/googleAppEngine/lib/django_1_4/django/utils/numberformat.py
|
94
|
from django.conf import settings
from django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False):
"""
Gets a number (as a number or string), and returns it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = settings.USE_L10N and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping > 0
# Make the common case fast
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(unicode(number))
# sign
if float(number) < 0:
sign = '-'
else:
sign = ''
str_number = unicode(number)
if str_number[0] == '-':
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
if dec_part:
dec_part = decimal_sep + dec_part
# grouping
if use_grouping:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % grouping:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
|
z1gm4/desarrollo_web_udp
|
refs/heads/dev
|
env/lib/python2.7/UserDict.py
|
4
|
/usr/lib/python2.7/UserDict.py
|
tbenthompson/taskloaf
|
refs/heads/master
|
taskloaf/protocol.py
|
1
|
import cloudpickle
import capnp # noqa
import taskloaf.message_capnp
import logging
log = logging.getLogger(__name__)
class CloudPickleMsg:
@staticmethod
def serialize(args):
msg = taskloaf.message_capnp.Message.new_message()
msg.arbitrary = cloudpickle.dumps(args)
return msg
@staticmethod
def deserialize(msg):
return cloudpickle.loads(msg.arbitrary)
class Protocol:
def __init__(self):
self.msg_types = []
def add_msg_type(self, name, *, serializer=CloudPickleMsg, handler=None):
type_code = len(self.msg_types)
setattr(self, name, type_code)
self.msg_types.append((serializer, handler, name))
return type_code
def encode(self, source_name, type_code, *args):
serializer = self.msg_types[type_code][0]
m = serializer.serialize(*args)
m.typeCode = type_code
m.sourceName = source_name
return memoryview(m.to_bytes())
def handle(self, msg_buf, *args, **kwargs):
msg = taskloaf.message_capnp.Message.from_bytes(msg_buf)
serializer, handler, _ = self.msg_types[msg.typeCode]
data = serializer.deserialize(msg)
log.info(
f"handling from {msg.sourceName}"
f"a {self.get_name(msg.typeCode)} with data: {str(data)}"
)
handler(data, *args, **kwargs)
def get_name(self, type_code):
return self.msg_types[type_code][2]
|
cmvelo/ansible-modules-core
|
refs/heads/devel
|
cloud/openstack/_quantum_floating_ip_associate.py
|
146
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import time
try:
from novaclient.v1_1 import client as nova_client
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_floating_ip_associate
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_floating_ip instead
short_description: Associate or disassociate a particular floating IP with an instance
description:
- Associates or disassociates a specific floating IP with a particular instance
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- the tenant name of the login user
required: true
default: true
auth_url:
description:
- the keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- name of the region
required: false
default: None
state:
description:
- indicates the desired state of the resource
choices: ['present', 'absent']
default: present
instance_name:
description:
- name of the instance to which the public IP should be assigned
required: true
default: None
ip_address:
description:
- floating ip that should be assigned to the instance
required: true
default: None
requirements:
- "python >= 2.6"
- "python-novaclient"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Associate a specific floating IP with an Instance
- quantum_floating_ip_associate:
state=present
login_username=admin
login_password=admin
login_tenant_name=admin
ip_address=1.1.1.1
instance_name=vm1
'''
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_server_state(module, nova):
server_info = None
server = None
try:
for server in nova.servers.list():
if server:
info = server._info
if info['name'] == module.params['instance_name']:
if info['status'] != 'ACTIVE' and module.params['state'] == 'present':
module.fail_json(msg="The VM is available but not Active. state:" + info['status'])
server_info = info
break
except Exception, e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
return server_info, server
def _get_port_id(neutron, module, instance_id):
kwargs = dict(device_id = instance_id)
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
return ports['ports'][0]['id']
def _get_floating_ip_id(module, neutron):
kwargs = {
'floating_ip_address': module.params['ip_address']
}
try:
ips = neutron.list_floatingips(**kwargs)
except Exception, e:
module.fail_json(msg = "error in fetching the floatingips's %s" % e.message)
if not ips['floatingips']:
module.fail_json(msg = "Could find the ip specified in parameter, Please check")
ip = ips['floatingips'][0]['id']
if not ips['floatingips'][0]['port_id']:
state = "detached"
else:
state = "attached"
return state, ip
def _update_floating_ip(neutron, module, port_id, floating_ip_id):
kwargs = {
'port_id': port_id
}
try:
result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs})
except Exception, e:
module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message)
module.exit_json(changed = True, result = result, public_ip=module.params['ip_address'])
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
instance_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-novaclient, python-keystoneclient, and either python-neutronclient or python-quantumclient are required')
try:
nova = nova_client.Client(module.params['login_username'], module.params['login_password'],
module.params['login_tenant_name'], module.params['auth_url'], service_type='compute')
except Exception, e:
module.fail_json( msg = " Error in authenticating to nova: %s" % e.message)
neutron = _get_neutron_client(module, module.params)
state, floating_ip_id = _get_floating_ip_id(module, neutron)
if module.params['state'] == 'present':
if state == 'attached':
module.exit_json(changed = False, result = 'attached', public_ip=module.params['ip_address'])
server_info, server_obj = _get_server_state(module, nova)
if not server_info:
module.fail_json(msg = " The instance name provided cannot be found")
port_id = _get_port_id(neutron, module, server_info['id'])
if not port_id:
module.fail_json(msg = "Cannot find a port for this instance, maybe fixed ip is not assigned")
_update_floating_ip(neutron, module, port_id, floating_ip_id)
if module.params['state'] == 'absent':
if state == 'detached':
module.exit_json(changed = False, result = 'detached')
if state == 'attached':
_update_floating_ip(neutron, module, None, floating_ip_id)
module.exit_json(changed = True, result = "detached")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
b-me/django
|
refs/heads/master
|
django/contrib/gis/db/models/query.py
|
224
|
import warnings
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.db.models.fields import (
GeometryField, LineStringField, PointField, get_srid_info,
)
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.sql import (
AreaField, DistanceField, GeomField, GMLField,
)
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
from django.db import connections
from django.db.models.expressions import RawSQL
from django.db.models.fields import Field
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
# ### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Performing setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup(
'area', field_name=kwargs.get('field_name'))
s = {'procedure_args': procedure_args,
'geo_field': geo_field,
'setup': False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def collect(self, **kwargs):
"""
Performs an aggregate collect operation on the given geometry field.
This is analogous to a union operation, but much faster because
boundaries are not dissolved.
"""
warnings.warn(
"The collect GeoQuerySet method is deprecated. Use the Collect() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango110Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Collect, **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
warnings.warn(
"The extent GeoQuerySet method is deprecated. Use the Extent() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango110Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent, **kwargs)
def extent3d(self, **kwargs):
"""
Returns the aggregate extent, in 3D, of the features in the
GeoQuerySet. It is returned as a 6-tuple, comprising:
(xmin, ymin, zmin, xmax, ymax, zmax).
"""
warnings.warn(
"The extent3d GeoQuerySet method is deprecated. Use the Extent3D() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango110Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent3D, **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geometry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the user wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ '
'support GeoJSON serialization.')
if not isinstance(precision, six.integer_types):
raise TypeError('Precision keyword must be set with an integer.')
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
s = {'desc': 'GeoJSON',
'procedure_args': {'precision': precision, 'options': options},
'procedure_fmt': '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc': 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc': 'GML', 'procedure_args': {'precision': precision}}
if backend.postgis:
s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s'
s['procedure_args'] = {'precision': precision, 'version': version}
if backend.oracle:
s['select_field'] = GMLField()
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc': 'KML',
'procedure_fmt': '%(geo_col)s,%(precision)s',
'procedure_args': {'precision': kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
warnings.warn(
"The make_line GeoQuerySet method is deprecated. Use the MakeLine() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango110Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field': GeomField()}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size': size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize,
'xorigin': xorigin, 'yorigin': yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt': procedure_fmt,
'procedure_args': procedure_args,
'select_field': GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, six.integer_types):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {
'desc': 'SVG',
'procedure_fmt': '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args': {
'rel': relative,
'precision': precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, six.integer_types):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name')
self._spatial_setup('transform', field_name=field_name)
self.query.add_context('transformed_srid', srid)
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
warnings.warn(
"The unionagg GeoQuerySet method is deprecated. Use the Union() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango110Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Union, **kwargs)
# ### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None:
desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function': func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_aggregate(self, aggregate, field_name=None,
geo_field_type=None, tolerance=0.05):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Getting the field the geographic aggregate will be called on.
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name)
# Checking if there are any geo field type limitations on this
# aggregate (e.g. ST_Makeline only operates on PointFields).
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__))
# Getting the string expression of the field name, as this is the
# argument taken by `Aggregate` objects.
agg_col = field_name or geo_field.name
# Adding any keyword parameters for the Aggregate object. Oracle backends
# in particular need an additional `tolerance` parameter.
agg_kwargs = {}
if connections[self.db].ops.oracle:
agg_kwargs['tolerance'] = tolerance
# Calling the QuerySet.aggregate, and returning only the value of the aggregate.
return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg']
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
warnings.warn(
"The %s GeoQuerySet method is deprecated. See GeoDjango Functions "
"documentation to find the expression-based replacement." % att,
RemovedInDjango20Warning, stacklevel=2
)
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(
att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type'))
for k, v in six.iteritems(default_args):
settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, six.string_types):
model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, None, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field'):
select_field = settings['select_field']
if connection.ops.oracle:
select_field.empty_strings_allowed = False
else:
select_field = Field()
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
self.query.add_annotation(
RawSQL(fmt % settings['procedure_args'], settings['select_params'], select_field),
model_att)
return self
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name'))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
srid = self.query.get_context('transformed_srid')
if srid:
u, unit_name, s = get_srid_info(srid, connection)
geodetic = unit_name.lower() in geo_field.geodetic_units
if geodetic and not connection.features.supports_distance_geodetic:
raise ValueError(
'This database does not support linear distance '
'calculations on geodetic coordinate systems.'
)
if distance:
if srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, srid)
if geom.srid is None or geom.srid == srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += (', %s(%s(%%%%s, %s), %s)' % (
backend.transform, backend.from_text,
geom.srid, srid))
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(six.memoryview(params[0].ewkb)).geom_type) == 'Point':
raise ValueError(
'Spherical distance calculation only supported with '
'Point Geometry parameters'
)
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.distance_spheroid, 'spheroid': params[1]})
else:
procedure_args.update({'function': backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.length_spheroid, 'spheroid': params[1]})
elif geom_3d and connection.features.supports_3d_functions:
# Use 3D variants of perimeter and length routines on supported backends.
if perimeter:
procedure_args.update({'function': backend.perimeter3d})
elif length:
procedure_args.update({'function': backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field': DistanceField(dist_att),
'setup': False,
'geo_field': geo_field,
'procedure_args': procedure_args,
'procedure_fmt': procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field': GeomField()}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance': tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {
'geom_args': ('geom',),
'select_field': GeomField(),
'procedure_fmt': '%(geo_col)s,%(geom)s',
'procedure_args': {'geom': geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
compiler = self.query.get_compiler(self.db)
opts = self.model._meta
if geo_field not in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
# Note: the operation really is defined as "must add select related!"
self.query.add_select_related([field_name])
# Call pre_sql_setup() so that compiler.select gets populated.
compiler.pre_sql_setup()
for col, _, _ in compiler.select:
if col.output_field == geo_field:
return col.as_sql(compiler, compiler.connection)[0]
raise ValueError("%r not in compiler's related_select_cols" % geo_field)
elif geo_field not in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
parent_model = geo_field.model._meta.concrete_model
return self._field_column(compiler, geo_field, parent_model._meta.db_table)
else:
return self._field_column(compiler, geo_field)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered or the one specified via
the `field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuerySet's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for field in self.model._meta.fields:
if isinstance(field, GeometryField):
return field
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GISLookup._check_geo_field(self.model._meta, field_name)
def _field_column(self, compiler, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuerySet` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None:
table_alias = compiler.query.get_meta().db_table
return "%s.%s" % (compiler.quote_name_unless_alias(table_alias),
compiler.connection.ops.quote_name(column or field.column))
|
fabioz/Pydev
|
refs/heads/master
|
plugins/org.python.pydev.jython/Lib/poplib.py
|
223
|
"""A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <david_ascher@brown.edu>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <piers@cs.su.oz.au> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <urtubia@mrbook.org> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import re, socket
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = '\r'
LF = '\n'
CRLF = CR+LF
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _putline(self, line):
if self._debugging > 1: print '*put*', repr(line)
self.sock.sendall('%s%s' % (line, CRLF))
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print '*cmd*', repr(line)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline()
if self._debugging > 1: print '*get*', repr(line)
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print '*resp*', repr(resp)
c = resp[:1]
if c != '+':
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != '.':
if line[:2] == '..':
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print '*stat*', repr(rets)
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
try:
resp = self._shortcmd('QUIT')
except error_proto, val:
resp = val
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(r'\+OK.*(<[^>]+>)')
def apop(self, user, secret):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
secret - secret shared between client and server.
NB: mailbox is locked by server from here to 'quit()'
"""
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = hashlib.md5(m.group(1)+secret).digest()
digest = ''.join(map(lambda x:'%02x'%ord(x), digest))
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
try:
import ssl
except ImportError:
pass
else:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that countains your private key
certfile - PEM formatted certificate chain file
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port = POP3_SSL_PORT, keyfile = None, certfile = None):
self.host = host
self.port = port
self.keyfile = keyfile
self.certfile = certfile
self.buffer = ""
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
self.file = self.sock.makefile('rb')
self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile)
self._debugging = 0
self.welcome = self._getresp()
def _fillBuffer(self):
localbuf = self.sslobj.read()
if len(localbuf) == 0:
raise error_proto('-ERR EOF')
self.buffer += localbuf
def _getline(self):
line = ""
renewline = re.compile(r'.*?\n')
match = renewline.match(self.buffer)
while not match:
self._fillBuffer()
match = renewline.match(self.buffer)
line = match.group(0)
self.buffer = renewline.sub('' ,self.buffer, 1)
if self._debugging > 1: print '*get*', repr(line)
octets = len(line)
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
def _putline(self, line):
if self._debugging > 1: print '*put*', repr(line)
line += CRLF
bytes = len(line)
while bytes > 0:
sent = self.sslobj.write(line)
if sent == bytes:
break # avoid copy
line = line[sent:]
bytes = bytes - sent
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
try:
resp = self._shortcmd('QUIT')
except error_proto, val:
resp = val
self.sock.close()
del self.sslobj, self.sock
return resp
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print a.getwelcome()
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print "Message %d:" % i
for line in msg:
print ' ' + line
print '-----------------------'
a.quit()
|
alkalait/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
refs/heads/master
|
ExamplesFromChapters/Chapter1/SMS_behaviour.py
|
90
|
import pymc as pm
import numpy as np
count_data = np.loadtxt("../../Chapter1_Introduction/data/txtdata.csv")
n_count_data = len(count_data)
alpha = 1.0 / count_data.mean() # recall count_data is
# the variable that holds our txt counts
lambda_1 = pm.Exponential("lambda_1", alpha)
lambda_2 = pm.Exponential("lambda_2", alpha)
tau = pm.DiscreteUniform("tau", lower=0, upper=n_count_data)
@pm.deterministic
def lambda_(tau=tau, lambda_1=lambda_1, lambda_2=lambda_2):
out = np.zeros(n_count_data)
out[:tau] = lambda_1 # lambda before tau is lambda1
out[tau:] = lambda_2 # lambda after tau is lambda2
return out
observation = pm.Poisson("obs", lambda_, value=count_data, observed=True)
model = pm.Model([observation, lambda_1, lambda_2, tau])
mcmc = pm.MCMC(model)
mcmc.sample(100000, 50000, 1)
|
andreyvit/pyjamas
|
refs/heads/master
|
examples/timesheet/components/Grid.py
|
7
|
# vim: set ts=4 sw=4 expandtab:
import pyjamas.ui.Grid
class Grid(pyjamas.ui.Grid.Grid):
def __init__(self, topHeader = True, leftBorder=True):
pyjamas.ui.Grid.Grid.__init__(self)
self.selectedRow = 0
if topHeader:
self.top = 1
else:
self.top = 0
if leftBorder:
self.left = 1
else:
self.left = 0
def createGrid(self, rows, cols):
self.resize(rows+self.top, cols+self.left)
self.values = {}
self.getRowFormatter().addStyleName(0, "gwt-BorderRow")
for row in range(rows):
self.values[row] = {}
self.values[row][0] = row
self.getRowFormatter().addStyleName(row+1, "gwt-UnselectedRow")
self.getCellFormatter().addStyleName(row+1, 0, "gwt-BorderCell")
self.setHTML(row+1, 0, "<b>%s</b>" % (row+1))
for col in range(0,cols):
self.setCellValue(row, col, "")
def setColLabelValue(self, col, value):
self.setHTML(0, col+self.left, '<b>%s</b>'% value)
def setRowLabelValue(self, row, value):
self.setHTML(row+self.top, 0, '<b>%s</b>' % value)
def setCellValue(self, row, col, value):
self.values[row][col] = value
if value == "":
value = " "
self.setHTML(row+self.top, col+self.left, value)
def clearGrid(self):
for row in range(1, self.getRowCount()):
for col in range(1, self.getColumnCount()):
self.clearCell(row, col)
self.selectRow(-1)
def selectRow(self, row):
self.styleRow(self.selectedRow, False)
self.styleRow(row, True)
self.selectedRow = row
def styleRow(self, row, selected):
if row > 0 and row < self.getRowCount():
if selected:
self.getRowFormatter().addStyleName(row, "gwt-SelectedRow")
else:
self.getRowFormatter().removeStyleName(row, "gwt-SelectedRow")
|
kartoza/miniSASS
|
refs/heads/master
|
monitor/migrations/0001_initial.py
|
2
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Organisations'
db.create_table(u'organisations', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('org_name', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('org_type', self.gf('django.db.models.fields.CharField')(max_length=11, blank=True)),
))
db.send_create_signal('monitor', ['Organisations'])
# Adding model 'Sites'
db.create_table(u'sites', (
('gid', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('the_geom', self.gf('django.contrib.gis.db.models.fields.PointField')()),
('site_name', self.gf('django.db.models.fields.CharField')(max_length=15)),
('river_name', self.gf('django.db.models.fields.CharField')(max_length=15)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('river_cat', self.gf('django.db.models.fields.CharField')(max_length=5, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('time_stamp', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
))
db.send_create_signal('monitor', ['Sites'])
# Adding model 'ArchivedSites'
db.create_table(u'archived_sites', (
('gid', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('the_geom', self.gf('django.contrib.gis.db.models.fields.PointField')()),
('site_name', self.gf('django.db.models.fields.CharField')(max_length=15)),
('river_name', self.gf('django.db.models.fields.CharField')(max_length=15)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('river_cat', self.gf('django.db.models.fields.CharField')(max_length=5, blank=True)),
('user_id', self.gf('django.db.models.fields.IntegerField')(default=0)),
('time_stamp', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
))
db.send_create_signal('monitor', ['ArchivedSites'])
# Adding model 'Observations'
db.create_table(u'observations', (
('gid', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('flatworms', self.gf('django.db.models.fields.BooleanField')(default=False)),
('worms', self.gf('django.db.models.fields.BooleanField')(default=False)),
('leeches', self.gf('django.db.models.fields.BooleanField')(default=False)),
('crabs_shrimps', self.gf('django.db.models.fields.BooleanField')(default=False)),
('stoneflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('minnow_mayflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('other_mayflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('damselflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('dragonflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('bugs_beetles', self.gf('django.db.models.fields.BooleanField')(default=False)),
('caddisflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('true_flies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('snails', self.gf('django.db.models.fields.BooleanField')(default=False)),
('score', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=2)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(related_name='observation', db_column='site', to=orm['monitor.Sites'])),
('time_stamp', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('obs_date', self.gf('django.db.models.fields.DateField')()),
('flag', self.gf('django.db.models.fields.CharField')(default='dirty', max_length=5)),
('water_clarity', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=1, blank=True)),
('water_temp', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=1, blank=True)),
('ph', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=1, blank=True)),
('diss_oxygen', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2, blank=True)),
('diss_oxygen_unit', self.gf('django.db.models.fields.CharField')(default='mgl', max_length=8, blank=True)),
('elec_cond', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2, blank=True)),
('elec_cond_unit', self.gf('django.db.models.fields.CharField')(default='mSm', max_length=8, blank=True)),
))
db.send_create_signal('monitor', ['Observations'])
# Adding model 'ArchivedObservations'
db.create_table(u'archived_observations', (
('gid', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user_id', self.gf('django.db.models.fields.IntegerField')(default=0)),
('flatworms', self.gf('django.db.models.fields.BooleanField')(default=False)),
('worms', self.gf('django.db.models.fields.BooleanField')(default=False)),
('leeches', self.gf('django.db.models.fields.BooleanField')(default=False)),
('crabs_shrimps', self.gf('django.db.models.fields.BooleanField')(default=False)),
('stoneflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('minnow_mayflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('other_mayflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('damselflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('dragonflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('bugs_beetles', self.gf('django.db.models.fields.BooleanField')(default=False)),
('caddisflies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('true_flies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('snails', self.gf('django.db.models.fields.BooleanField')(default=False)),
('score', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=2)),
('site_id', self.gf('django.db.models.fields.IntegerField')(default=0)),
('time_stamp', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('obs_date', self.gf('django.db.models.fields.DateField')()),
('flag', self.gf('django.db.models.fields.CharField')(default='dirty', max_length=5)),
('water_clarity', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=1, blank=True)),
('water_temp', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=1, blank=True)),
('ph', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=1, blank=True)),
('diss_oxygen', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2, blank=True)),
('diss_oxygen_unit', self.gf('django.db.models.fields.CharField')(default='mgl', max_length=8, blank=True)),
('elec_cond', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2, blank=True)),
('elec_cond_unit', self.gf('django.db.models.fields.CharField')(default='mSm', max_length=8, blank=True)),
))
db.send_create_signal('monitor', ['ArchivedObservations'])
# Adding model 'ObservationPlugin'
db.create_table('cmsplugin_observationplugin', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
))
db.send_create_signal('monitor', ['ObservationPlugin'])
def backwards(self, orm):
# Deleting model 'Organisations'
db.delete_table(u'organisations')
# Deleting model 'Sites'
db.delete_table(u'sites')
# Deleting model 'ArchivedSites'
db.delete_table(u'archived_sites')
# Deleting model 'Observations'
db.delete_table(u'observations')
# Deleting model 'ArchivedObservations'
db.delete_table(u'archived_observations')
# Deleting model 'ObservationPlugin'
db.delete_table('cmsplugin_observationplugin')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'monitor.archivedobservations': {
'Meta': {'object_name': 'ArchivedObservations', 'db_table': "u'archived_observations'"},
'bugs_beetles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'caddisflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'crabs_shrimps': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'damselflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diss_oxygen': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'diss_oxygen_unit': ('django.db.models.fields.CharField', [], {'default': "'mgl'", 'max_length': '8', 'blank': 'True'}),
'dragonflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'elec_cond': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'elec_cond_unit': ('django.db.models.fields.CharField', [], {'default': "'mSm'", 'max_length': '8', 'blank': 'True'}),
'flag': ('django.db.models.fields.CharField', [], {'default': "'dirty'", 'max_length': '5'}),
'flatworms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gid': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leeches': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'minnow_mayflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'obs_date': ('django.db.models.fields.DateField', [], {}),
'other_mayflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ph': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1', 'blank': 'True'}),
'score': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'site_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'snails': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'stoneflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'true_flies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'water_clarity': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '1', 'blank': 'True'}),
'water_temp': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1', 'blank': 'True'}),
'worms': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'monitor.archivedsites': {
'Meta': {'object_name': 'ArchivedSites', 'db_table': "u'archived_sites'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gid': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'river_cat': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'river_name': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'site_name': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'the_geom': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'monitor.observationplugin': {
'Meta': {'object_name': 'ObservationPlugin', 'db_table': "'cmsplugin_observationplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'monitor.observations': {
'Meta': {'object_name': 'Observations', 'db_table': "u'observations'"},
'bugs_beetles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'caddisflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'crabs_shrimps': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'damselflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diss_oxygen': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'diss_oxygen_unit': ('django.db.models.fields.CharField', [], {'default': "'mgl'", 'max_length': '8', 'blank': 'True'}),
'dragonflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'elec_cond': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'elec_cond_unit': ('django.db.models.fields.CharField', [], {'default': "'mSm'", 'max_length': '8', 'blank': 'True'}),
'flag': ('django.db.models.fields.CharField', [], {'default': "'dirty'", 'max_length': '5'}),
'flatworms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gid': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leeches': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'minnow_mayflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'obs_date': ('django.db.models.fields.DateField', [], {}),
'other_mayflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ph': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1', 'blank': 'True'}),
'score': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'observation'", 'db_column': "'site'", 'to': "orm['monitor.Sites']"}),
'snails': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'stoneflies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'true_flies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'water_clarity': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '1', 'blank': 'True'}),
'water_temp': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1', 'blank': 'True'}),
'worms': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'monitor.organisations': {
'Meta': {'object_name': 'Organisations', 'db_table': "u'organisations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'org_type': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'})
},
'monitor.schools': {
'Meta': {'object_name': 'Schools', 'db_table': "u'schools'", 'managed': 'False'},
'gid': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'natemis': ('django.db.models.fields.IntegerField', [], {}),
'phase': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'province': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'the_geom': ('django.contrib.gis.db.models.fields.PointField', [], {})
},
'monitor.sites': {
'Meta': {'object_name': 'Sites', 'db_table': "u'sites'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gid': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'river_cat': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'river_name': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'site_name': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'the_geom': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['monitor']
|
nicholas-silveira/art_pipeline
|
refs/heads/master
|
maya/packages/oop_maya/core/ui_test.py
|
1
|
import oop_maya.core as oop_maya
import oop_python.core as oop_python #@UnresolvedImport
UI_FILE_PATH = 'C:\\Users\\NicholasSilveira\\Documents\\GitHub\\art_pipeline\\dcc\\packages\\oop_dcc\\tools\\developer\\ui\\batch_tool.ui'
ui_object, base_class = oop_python.get_pyside_class( UI_FILE_PATH )
class ui_test( base_class, ui_object ):
def __init__( self, parent = oop_maya.get_maya_window(), *args ):
super( ui_test, self ).__init__( parent )
self.setupUi( self )
self.show()
ui_test()
|
jolyonb/edx-platform
|
refs/heads/master
|
common/djangoapps/util/query.py
|
1
|
""" Utility functions related to database queries """
from __future__ import absolute_import
from django.conf import settings
def use_read_replica_if_available(queryset):
"""
If there is a database called 'read_replica', use that database for the queryset.
"""
return queryset.using("read_replica") if "read_replica" in settings.DATABASES else queryset
|
synth3tk/the-blue-alliance
|
refs/heads/master
|
datafeeds/tba_videos_parser.py
|
7
|
import logging
import re
from BeautifulSoup import BeautifulSoup
from datafeeds.parser_base import ParserBase
class TbaVideosParser(ParserBase):
"""
Facilitates building TBAVideos store from TBA.
"""
@classmethod
def parse(self, html):
"""
Parse the directory listing on TBA to extract relevant TBAVideo
information. Returns a list of TBAVideos
"""
soup = BeautifulSoup(html,
convertEntities=BeautifulSoup.HTML_ENTITIES)
videos = dict()
for a in soup.findAll("a", href=True):
parts = a["href"].split(".")
if len(parts) == 2:
(key, filetype) = parts
videos.setdefault(key, list())
videos[key].append(filetype)
else:
logging.info("Malformed video filename: " + a["href"])
continue
return videos, False
|
Ultrax5/android_kernel_sony_msm8960t
|
refs/heads/master_jb-4.3
|
scripts/build-all.py
|
1182
|
#! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
mohsenSy/zulip
|
refs/heads/master
|
api/integrations/perforce/zulip_change-commit.py
|
20
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2012-2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''Zulip notification change-commit hook.
In Perforce, The "change-commit" trigger is fired after a metadata has been
created, files have been transferred, and the changelist comitted to the depot
database.
This specific trigger expects command-line arguments in the form:
%change% %changeroot%
For example:
1234 //depot/security/src/
'''
from __future__ import print_function
import os
import sys
import os.path
import git_p4
__version__ = "0.1"
sys.path.insert(0, os.path.dirname(__file__))
import zulip_perforce_config as config
if config.ZULIP_API_PATH is not None:
sys.path.append(config.ZULIP_API_PATH)
import zulip
client = zulip.Client(
email=config.ZULIP_USER,
site=config.ZULIP_SITE,
api_key=config.ZULIP_API_KEY,
client="ZulipPerforce/" + __version__)
try:
changelist = int(sys.argv[1])
changeroot = sys.argv[2]
except IndexError:
print("Wrong number of arguments.\n\n", end=' ', file=sys.stderr)
print(__doc__, file=sys.stderr)
sys.exit(-1)
except ValueError:
print("First argument must be an integer.\n\n", end=' ', file=sys.stderr)
print(__doc__, file=sys.stderr)
sys.exit(-1)
metadata = git_p4.p4_describe(changelist)
destination = config.commit_notice_destination(changeroot, changelist)
if destination is None:
# Don't forward the notice anywhere
sys.exit(0)
message = """**{0}** committed revision @{1} to `{2}`.
> {3}
""".format(metadata["user"], metadata["change"], changeroot, metadata["desc"])
message_data = {
"type": "stream",
"to": destination["stream"],
"subject": destination["subject"],
"content": message,
}
client.send_message(message_data)
|
tobbad/micropython
|
refs/heads/master
|
tests/basics/list_sort.py
|
63
|
l = [1, 3, 2, 5]
print(l)
print(sorted(l))
l.sort()
print(l)
print(l == sorted(l))
print(sorted(l, key=lambda x: -x))
l.sort(key=lambda x: -x)
print(l)
print(l == sorted(l, key=lambda x: -x))
print(sorted(l, key=lambda x: -x, reverse=True))
l.sort(key=lambda x: -x, reverse=True)
print(l)
print(l == sorted(l, key=lambda x: -x, reverse=True))
print(sorted(l, reverse=True))
l.sort(reverse=True)
print(l)
print(l == sorted(l, reverse=True))
print(sorted(l, reverse=False))
l.sort(reverse=False)
print(l)
print(l == sorted(l, reverse=False))
# test large lists (should not stack overflow)
l = list(range(200))
l.sort()
print(l[0], l[-1])
l.sort(reverse=True)
print(l[0], l[-1])
# test user-defined ordering
class A:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x > other.x
def __repr__(self):
return str(self.x)
l = [A(5), A(2), A(1), A(3), A(4)]
print(l)
l.sort()
print(l)
l.sort(reverse=True)
print(l)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.