repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
karrrt/scale | lib/python2.7/site-packages/pip/_vendor/progress/spinner.py | 404 | 1341 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from . import Infinite
from .helpers import WriteMixin
class Spinner(WriteMixin, Infinite):
message = ''
phases = ('-', '\\', '|', '/')
hide_cursor = True
def update(self):
i = self.index % len(self.phases)
self.write(self.phases[i])
class PieSpinner(Spinner):
phases = ['◷', '◶', '◵', '◴']
class MoonSpinner(Spinner):
phases = ['◑', '◒', '◐', '◓']
class LineSpinner(Spinner):
phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻']
| mit |
ifwe/tasr | test/pyunit/test_client_methods.py | 1 | 21180 | '''
Created on May 7, 2014
@author: cmills
'''
from client_test import TestTASRAppClient
import unittest
import tasr.client
import tasr.client_legacy
import httmock
class TestTASRClientMethods(TestTASRAppClient):
def setUp(self):
super(TestTASRClientMethods, self).setUp()
self.event_type = "gold"
fix_rel_path = "schemas/%s.avsc" % (self.event_type)
self.avsc_file = self.get_fixture_file(fix_rel_path, "r")
self.schema_str = self.avsc_file.read()
# client settings
self.host = self.app.config.host # should match netloc below
self.port = self.app.config.port # should match netloc below
# clear out all the keys before beginning -- careful!
self.app.ASR.redis.flushdb()
def tearDown(self):
# this clears out redis after each test -- careful!
self.app.ASR.redis.flushdb()
def bare_register_subject_skeleton(self, config_dict=None):
'''register_subject() - skeleton test'''
with httmock.HTTMock(self.route_to_testapp):
meta = tasr.client.register_subject(self.event_type,
config_dict,
self.host, self.port)
self.assertIn(self.event_type, meta.name, 'Bad subject name.')
return meta
def bare_register_schema_skeleton(self, schema_str):
'''register_schema_for_topic() - skeleton test'''
# whitespace gets normalized, so do that locally to the submitted
# schema string so we have an accurate target for comparison
ras = tasr.registered_schema.RegisteredAvroSchema()
ras.schema_str = schema_str
canonical_schema_str = ras.canonical_schema_str
with httmock.HTTMock(self.route_to_testapp):
func = tasr.client.register_schema
rs = func(self.event_type, schema_str, self.host, self.port)
self.assertEqual(canonical_schema_str, rs.schema_str,
'Schema string modified!')
self.assertIn(self.event_type, rs.group_names,
'Topic not in registered schema object.')
self.assertIn(self.event_type, rs.ts_dict.keys(),
'Topic not in registration timestamps.')
return rs
########################################################################
# subject tests for S+V API
########################################################################
def test_bare_register_subject(self):
'''register_group() - as expected'''
self.bare_register_subject_skeleton()
def test_bare_lookup_subject(self):
'''lookup_subject() - as expected, should return True'''
self.bare_register_subject_skeleton()
with httmock.HTTMock(self.route_to_testapp):
self.assertTrue(tasr.client.lookup_subject(self.event_type,
self.host,
self.port))
def test_bare_lookup_missing_subject(self):
'''lookup_subject() - no such subject, should return False'''
with httmock.HTTMock(self.route_to_testapp):
self.assertFalse(tasr.client.lookup_subject(self.event_type,
self.host,
self.port))
def test_bare_get_subject_config(self):
'''get_subject_config() - as expected'''
test_config = {'bob': 'alice'}
self.bare_register_subject_skeleton(test_config)
with httmock.HTTMock(self.route_to_testapp):
config_dict = tasr.client.get_subject_config(self.event_type,
self.host,
self.port)
self.assertDictEqual(test_config, config_dict, 'bad config dict')
def test_bare_update_subject_config(self):
'''update_subject_config() - as expected'''
test_config = {'bob': 'alice'}
self.bare_register_subject_skeleton(test_config)
with httmock.HTTMock(self.route_to_testapp):
update_config = {'bob': 'cynthia', 'doris': 'eve'}
config_dict = tasr.client.update_subject_config(self.event_type,
update_config,
self.host,
self.port)
self.assertDictEqual(update_config, config_dict, 'bad config dict')
def test_bare_is_subject_integral(self):
'''is_subject_integral() - as expected'''
self.bare_register_subject_skeleton()
with httmock.HTTMock(self.route_to_testapp):
is_int = tasr.client.is_subject_integral(self.event_type,
self.host,
self.port)
self.assertFalse(is_int)
def test_bare_get_get_active_subject_names_with_none_and_one_present(self):
'''get_active_subject_names() - as expected'''
self.bare_register_subject_skeleton()
# without a schema, the subject is not active
with httmock.HTTMock(self.route_to_testapp):
subject_names = tasr.client.get_active_subject_names(self.host,
self.port)
self.assertEqual(0, len(subject_names), 'expected no subjects')
# now reg a schema and try again
self.bare_register_schema_skeleton(self.schema_str)
with httmock.HTTMock(self.route_to_testapp):
subject_names = tasr.client.get_active_subject_names(self.host,
self.port)
self.assertListEqual(subject_names, [self.event_type, ],
'unexpected groups: %s' % subject_names)
def test_bare_get_get_all_subject_names_with_one_present(self):
'''get_all_subject_names() - as expected'''
self.bare_register_subject_skeleton()
with httmock.HTTMock(self.route_to_testapp):
subject_names = tasr.client.get_all_subject_names(self.host,
self.port)
self.assertListEqual(subject_names, [self.event_type, ],
'unexpected groups: %s' % subject_names)
def test_bare_get_get_all_subject_names_with_none_present(self):
'''get_all_subject_names() - checking an empty list doesn't blow up'''
with httmock.HTTMock(self.route_to_testapp):
subject_names = tasr.client.get_all_subject_names(self.host,
self.port)
self.assertEqual(0, len(subject_names), 'expected no subjects')
def test_bare_get_all_subject_schema_ids(self):
'''get_all_subject_schema_ids() - as expected'''
with httmock.HTTMock(self.route_to_testapp):
schemas = []
sha256_ids = []
for v in range(1, 50):
ver_schema_str = self.get_schema_permutation(self.schema_str,
"fn_%s" % v)
# whitespace gets normalized, so do that locally to the
# submitted schema string so we have an accurate target for
# comparison
ras = tasr.registered_schema.RegisteredAvroSchema()
ras.schema_str = ver_schema_str
canonical_ver_schema_str = ras.canonical_schema_str
schemas.append(canonical_ver_schema_str)
# reg with the non-canonicalized schema string
rs = self.bare_register_schema_skeleton(ver_schema_str)
self.assertEqual(canonical_ver_schema_str, rs.schema_str,
'Schema string modified!')
self.assertIn(self.event_type, rs.group_names,
'Subject not in registered schema object.')
self.assertEqual(rs.sha256_id, ras.sha256_id,
'SHA256 ID mismatch')
sha256_ids.append(rs.sha256_id)
# now pull the ID list and check it matches
ids = tasr.client.get_all_subject_schema_ids(self.event_type,
self.host,
self.port)
self.assertListEqual(sha256_ids, ids, 'ID list mismatch')
def test_bare_get_all_subject_schemas(self):
'''get_all_subject_schemas() - as expected'''
with httmock.HTTMock(self.route_to_testapp):
test_schema_strs = []
for v in range(1, 50):
ver_schema_str = self.get_schema_permutation(self.schema_str,
"fn_%s" % v)
# whitespace gets normalized, so do that locally to the
# submitted schema string so we have an accurate target for
# comparison
ras = tasr.registered_schema.RegisteredAvroSchema()
ras.schema_str = ver_schema_str
canonical_ver_schema_str = ras.canonical_schema_str
test_schema_strs.append(canonical_ver_schema_str)
# reg with the non-canonicalized schema string
rs = self.bare_register_schema_skeleton(ver_schema_str)
self.assertEqual(canonical_ver_schema_str, rs.schema_str,
'Schema string modified!')
self.assertIn(self.event_type, rs.group_names,
'Subject not in registered schema object.')
self.assertEqual(rs.sha256_id, ras.sha256_id,
'SHA256 ID mismatch')
# now pull the schema list and check it matches
schemas = tasr.client.get_all_subject_schemas(self.event_type,
self.host,
self.port)
for v in range(1, 50):
reg_schema = schemas[v - 1]
test_schema_str = test_schema_strs[v - 1]
self.assertEqual(reg_schema.canonical_schema_str,
test_schema_str, 'schema string mismatch')
########################################################################
# subject schema registration tests
########################################################################
def test_bare_register_schema(self):
'''register_schema_for_topic() - as expected'''
self.bare_register_schema_skeleton(self.schema_str)
def test_bare_reg_fail_on_empty_schema(self):
'''register_schema_for_topic() - fail on empty schema'''
try:
self.bare_register_schema_skeleton(None)
self.fail('should have thrown a TASRError')
except tasr.client.TASRError as te:
self.assertTrue(te, 'Missing TASRError')
def test_bare_reg_fail_on_invalid_schema(self):
'''register_schema_for_topic() - fail on invalid schema'''
try:
bad_schema = '%s }' % self.schema_str
self.bare_register_schema_skeleton(bad_schema)
self.fail('should have thrown a ValueError')
except tasr.client.TASRError as te:
self.fail('should have thrown a ValueError')
except ValueError:
pass
def test_bare_reg_and_rereg(self):
'''register_schema_for_topic() - multi calls, same schema'''
rs1 = self.bare_register_schema_skeleton(self.schema_str)
rs2 = self.bare_register_schema_skeleton(self.schema_str)
self.assertEqual(rs1, rs2, 'reg and rereg schemas unequal!')
def test_bare_register_schema_if_latest(self):
'''register_schema_if_latest() - as expected'''
self.bare_register_schema_skeleton(self.schema_str)
with httmock.HTTMock(self.route_to_testapp):
alt_schema_str = self.get_schema_permutation(self.schema_str)
cur_latest_ver = 1
rs = tasr.client.register_schema_if_latest(self.event_type,
cur_latest_ver,
alt_schema_str,
self.host,
self.port)
self.assertEqual(rs.current_version(self.event_type), 2, 'bad ver')
def test_bare_fail_register_schema_if_latest_stale_version(self):
'''register_schema_if_latest() - as expected'''
self.bare_register_schema_skeleton(self.schema_str)
alt_schema_str = self.get_schema_permutation(self.schema_str)
self.bare_register_schema_skeleton(alt_schema_str)
# so cur ver is now 2
with httmock.HTTMock(self.route_to_testapp):
old_ver = 1
try:
tasr.client.register_schema_if_latest(self.event_type,
old_ver,
self.schema_str,
self.host,
self.port)
self.fail('expected a TASRError')
except tasr.client.TASRError as te:
self.assertTrue(te, 'Missing TASRError')
def test_bare_fail_register_schema_if_latest_bad_version(self):
'''register_schema_if_latest() - as expected'''
self.bare_register_schema_skeleton(self.schema_str)
# so cur ver is now 1
with httmock.HTTMock(self.route_to_testapp):
alt_schema_str = self.get_schema_permutation(self.schema_str)
bad_ver = 2
try:
tasr.client.register_schema_if_latest(self.event_type,
bad_ver,
alt_schema_str,
self.host,
self.port)
self.fail('expected a TASRError')
except tasr.client.TASRError as te:
self.assertTrue(te, 'Missing TASRError')
########################################################################
# schema retrieval tests for TASR S+V API
########################################################################
def test_bare_lookup_by_schema_str(self):
'''lookup_by_schema_str() - as expected'''
reg_rs = self.bare_register_schema_skeleton(self.schema_str)
with httmock.HTTMock(self.route_to_testapp):
func = tasr.client.lookup_by_schema_str
ret_rs = func(self.event_type,
reg_rs.canonical_schema_str,
self.host, self.port)
self.assertEqual(reg_rs.sha256_id, ret_rs.sha256_id, 'ID mismatch')
def bare_get_for_subject_skeleton(self, subject_name, version):
'''lookup_by_version() - util method'''
with httmock.HTTMock(self.route_to_testapp):
func = tasr.client.lookup_by_version
return func(subject_name, version, self.host, self.port)
def test_bare_fail_lookup_by_version_bad_version(self):
'''lookup_by_version() - bad version'''
reg_rs = self.bare_register_schema_skeleton(self.schema_str)
bad_ver = reg_rs.current_version(self.event_type) + 1
try:
self.bare_get_for_subject_skeleton(self.schema_str, bad_ver)
self.fail('Should have thrown an TASRError')
except tasr.client_legacy.TASRError as te:
self.assertTrue(te, 'Missing TASRError')
def test_bare_lookup_by_version(self):
'''lookup_by_version() - multiple versions, as expected'''
schemas = []
for v in range(1, 50):
ver_schema_str = self.get_schema_permutation(self.schema_str,
"f_%s" % v)
# whitespace gets normalized, so do that locally to the submitted
# schema string so we have an accurate target for comparison
ras = tasr.registered_schema.RegisteredAvroSchema()
ras.schema_str = ver_schema_str
canonical_ver_schema_str = ras.canonical_schema_str
schemas.append(canonical_ver_schema_str)
# reg with the non-canonicalized schema string
rs = self.bare_register_schema_skeleton(ver_schema_str)
self.assertEqual(canonical_ver_schema_str, rs.schema_str,
'Schema string modified!')
self.assertIn(self.event_type, rs.group_names,
'Topic not in registered schema object.')
# now pull them by version and check they match what we sent originally
for v in range(1, 50):
rs = self.bare_get_for_subject_skeleton(self.event_type, v)
self.assertEqual(schemas[v - 1], rs.canonical_schema_str,
'Unexpected version.')
def test_bare_lookup_by_version_old_version(self):
'''get_schema_for_topic() - non-sequential re-reg'''
alt_schema_str = self.get_schema_permutation(self.schema_str)
rs1 = self.bare_register_schema_skeleton(self.schema_str)
self.bare_register_schema_skeleton(alt_schema_str)
rs3 = self.bare_register_schema_skeleton(self.schema_str)
self.assertEqual(3, rs3.current_version(self.event_type),
'unexpected version')
# now get version 1 -- should be same schema, and should list
# requested version as "current"
rs = self.bare_get_for_subject_skeleton(self.event_type, 1)
self.assertEqual(rs1.canonical_schema_str, rs.canonical_schema_str,
'Unexpected schema string change between v1 and v3.')
self.assertEqual(1, rs.current_version(self.event_type),
'Expected different current version value.')
def test_bare_lookup_by_sha256_id_str(self):
'''lookup_by_id_str() - multiple versions, as expected'''
sha256_ids = []
schemas = []
for v in range(1, 50):
ver_schema_str = self.get_schema_permutation(self.schema_str,
"fn_%s" % v)
# whitespace gets normalized, so do that locally to the submitted
# schema string so we have an accurate target for comparison
ras = tasr.registered_schema.RegisteredAvroSchema()
ras.schema_str = ver_schema_str
canonical_ver_schema_str = ras.canonical_schema_str
schemas.append(canonical_ver_schema_str)
# reg with the non-canonicalized schema string
rs = self.bare_register_schema_skeleton(ver_schema_str)
self.assertEqual(canonical_ver_schema_str, rs.schema_str,
'Schema string modified!')
self.assertIn(self.event_type, rs.group_names,
'Topic not in registered schema object.')
self.assertEqual(ras.sha256_id, rs.sha256_id, 'ID mismatch')
sha256_ids.append(rs.sha256_id)
# now pull them by sha256_id and check they match
with httmock.HTTMock(self.route_to_testapp):
for v in range(1, 50):
sha256_id = sha256_ids[v - 1]
schema_str = schemas[v - 1]
try:
rs = tasr.client.lookup_by_id_str(self.event_type,
sha256_id,
self.host,
self.port)
self.assertEqual(schema_str, rs.canonical_schema_str,
'schema string mismatch')
except tasr.client.TASRError as terr:
print terr
def test_bare_lookup_latest(self):
self.bare_register_schema_skeleton(self.schema_str)
alt_schema_str = self.get_schema_permutation(self.schema_str)
self.bare_register_schema_skeleton(alt_schema_str)
# so cur ver is now 2
with httmock.HTTMock(self.route_to_testapp):
rs = tasr.client.lookup_latest(self.event_type,
self.host,
self.port)
self.assertEqual(2, rs.current_version(self.event_type), 'bad ver')
if __name__ == "__main__":
SUITE = unittest.TestLoader().loadTestsFromTestCase(TestTASRClientMethods)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| apache-2.0 |
fastflo/emma | emmalib/widgets/querytab/QueryTabResultPopup.py | 1 | 12153 | """
QueryTabResultPopup
"""
# -*- coding: utf-8 -*-
# emma
#
# Copyright (C) 2006 Florian Schmidt (flo@fastflo.de)
# 2014 Nickolay Karnaukhov (mr.electronick@gmail.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gtk
class QueryTabResultPopup(gtk.Menu):
"""
@param query: QueryTab
@param is_single_row: Bool
"""
def __init__(self, query, is_single_row):
super(QueryTabResultPopup, self).__init__()
self.query = query
if is_single_row:
self.copy_field_value = self.add_imi(
gtk.STOCK_COPY,
'copy_field_value',
'Copy field value'
)
self.add_sep()
self.copy_record_as_csv = self.add_imi(
gtk.STOCK_COPY,
'copy_record_as_csv',
'Copy Record As CSV' if is_single_row else 'Copy Records As CSV'
)
self.copy_record_as_quoted_csv = self.add_imi(
gtk.STOCK_COPY,
'copy_record_as_quoted_csv',
'Copy Record As Quoted CSV' if is_single_row else 'Copy Records As Quoted CSV'
)
self.add_sep()
self.copy_column_as_csv = self.add_imi(
gtk.STOCK_COPY,
'copy_column_as_csv',
'Copy Column As CSV'
)
self.copy_column_as_quoted_csv = self.add_imi(
gtk.STOCK_COPY,
'copy_column_as_quoted_csv',
'Copy Column As Quoted CSV'
)
self.copy_column_names = self.add_imi(
gtk.STOCK_COPY,
'copy_column_names',
'Copy Column Names'
)
# self.set_field_value = self.add_imi(
# gtk.STOCK_COPY, 'set_field_value', 'Set Field Value to...')
self.add_sep()
self.add_record = self.add_imi(
gtk.STOCK_ADD,
'add_record',
'Add record'
)
self.delete_record = self.add_imi(
gtk.STOCK_DELETE,
'delete_record',
'Delete record' if is_single_row else 'Delete records'
)
self.show_all()
def add_sep(self):
"""
Add separator
"""
sep = gtk.SeparatorMenuItem()
self.append(sep)
def add_imi(self, stock, name, title):
"""
@param stock:
@param name:
@param title:
@return:
"""
item = gtk.ImageMenuItem(stock)
item.set_name(name)
item.set_label(title)
item.set_always_show_image(True)
item.connect('activate', self.activated)
self.append(item)
return item
def activated(self, item):
"""
@param item:
@return:
"""
q = self.query
path, column = q.treeview.get_cursor()
_iter = q.model.get_iter(path)
if item.name == "copy_field_value":
col_max = q.model.get_n_columns()
for col_num in range(col_max):
if column == q.treeview.get_column(col_num):
break
else:
return
value = q.model.get_value(_iter, col_num)
self.query.emma.clipboard.set_text(value)
self.query.emma.pri_clipboard.set_text(value)
elif item.name == "copy_record_as_csv":
col_max = q.model.get_n_columns()
value = ""
for col_num in range(col_max):
if value:
value += self.query.emma.config.get("copy_record_as_csv_delim")
v = q.model.get_value(_iter, col_num)
if v is not None:
value += v
self.query.emma.clipboard.set_text(value)
self.query.emma.pri_clipboard.set_text(value)
elif item.name == "copy_record_as_quoted_csv":
col_max = q.model.get_n_columns()
value = ""
for col_num in range(col_max):
if value:
value += self.query.emma.config.get("copy_record_as_csv_delim")
v = q.model.get_value(_iter, col_num)
if v is not None:
v = v.replace("\"", "\\\"")
value += '"%s"' % v
self.query.emma.clipboard.set_text(value)
self.query.emma.pri_clipboard.set_text(value)
elif item.name == "copy_column_as_csv":
col_max = q.model.get_n_columns()
for col_num in range(col_max):
if column == q.treeview.get_column(col_num):
break
else:
# print "column not found!"
return
value = ""
_iter = q.model.get_iter_first()
while _iter:
if value:
value += self.query.emma.config.get("copy_record_as_csv_delim")
v = q.model.get_value(_iter, col_num)
if v is not None:
value += v
_iter = q.model.iter_next(_iter)
self.query.emma.clipboard.set_text(value)
self.query.emma.pri_clipboard.set_text(value)
elif item.name == "copy_column_as_quoted_csv":
col_max = q.model.get_n_columns()
for col_num in range(col_max):
if column == q.treeview.get_column(col_num):
break
else:
# print "column not found!"
return
value = ""
_iter = q.model.get_iter_first()
while _iter:
if value:
value += self.query.emma.config.get("copy_record_as_csv_delim")
v = q.model.get_value(_iter, col_num)
if v is not None:
v = v.replace("\"", "\\\"")
value += '"%s"' % v
_iter = q.model.iter_next(_iter)
self.query.emma.clipboard.set_text(value)
self.query.emma.pri_clipboard.set_text(value)
elif item.name == "copy_column_names":
value = ""
for col in q.treeview.get_columns():
if value:
value += self.query.emma.config.get("copy_record_as_csv_delim")
value += col.get_title().replace("__", "_")
self.query.emma.clipboard.set_text(value)
self.query.emma.pri_clipboard.set_text(value)
elif item.name == "set_value_null":
col_max = q.model.get_n_columns()
for col_num in range(col_max):
if column == q.treeview.get_column(col_num):
break
else:
# print "column not found!"
return
table, where, field, value, row_iter = q.get_unique_where(q.last_source, path, col_num)
update_query = "update `%s` set `%s`=NULL where %s limit 1" % (table, field, where)
if self.query.query(update_query, encoding=q.encoding):
q.model.set_value(row_iter, col_num, None)
elif item.name == "set_value_now":
col_max = q.model.get_n_columns()
for col_num in range(col_max):
if column == q.treeview.get_column(col_num):
break
else:
# print "column not found!"
return
table, where, field, value, row_iter = q.get_unique_where(q.last_source, path, col_num)
update_query = "update `%s` set `%s`=now() where %s limit 1" % (table, field, where)
if not self.query.current_host.query(update_query, encoding=q.encoding):
return
self.query.current_host.query(
"select `%s` from `%s` where %s limit 1" % (field, table, where))
result = self.query.current_host.handle.store_result().fetch_row(0)
if len(result) < 1:
# print "error: can't find modfied row!?"
return
q.model.set_value(row_iter, col_num, result[0][0])
elif item.name == "set_value_unix_timestamp":
col_max = q.model.get_n_columns()
for col_num in range(col_max):
if column == q.treeview.get_column(col_num):
break
else:
# print "column not found!"
return
table, where, field, value, row_iter = q.get_unique_where(q.last_source, path, col_num)
update_query = "update `%s` set `%s`=unix_timestamp(now()) where %s limit 1" \
% (table, field, where)
if not self.query.current_host.query(update_query, encoding=q.encoding):
return
self.query.current_host.query(
"select `%s` from `%s` where %s limit 1" % (field, table, where))
result = self.query.current_host.handle.store_result().fetch_row(0)
if len(result) < 1:
# print "error: can't find modfied row!?"
return
q.model.set_value(row_iter, col_num, result[0][0])
elif item.name == "set_value_as_password":
col_max = q.model.get_n_columns()
for col_num in range(col_max):
if column == q.treeview.get_column(col_num):
break
else:
# print "column not found!"
return
table, where, field, value, row_iter = q.get_unique_where(q.last_source, path, col_num)
update_query = "update `%s` set `%s`=password('%s') where %s limit 1" % \
(
table,
field,
self.query.current_host.escape(value),
where
)
if not self.query.current_host.query(update_query, encoding=q.encoding):
return
self.query.current_host.query(
"select `%s` from `%s` where %s limit 1" % (field, table, where))
result = self.query.current_host.handle.store_result().fetch_row(0)
if len(result) < 1:
# print "error: can't find modfied row!?"
return
q.model.set_value(row_iter, col_num, result[0][0])
elif item.name == "set_value_to_sha":
col_max = q.model.get_n_columns()
for col_num in range(col_max):
if column == q.treeview.get_column(col_num):
break
else:
# print "column not found!"
return
table, where, field, value, row_iter = q.get_unique_where(q.last_source, path, col_num)
update_query = "update `%s` set `%s`=sha1('%s') where %s limit 1" % \
(
table,
field,
self.query.current_host.escape(value),
where
)
if not self.query.current_host.query(update_query, encoding=q.encoding):
return
self.query.current_host.query(
"select `%s` from `%s` where %s limit 1" % (field, table, where)
)
result = self.query.current_host.handle.store_result().fetch_row(0)
if len(result) < 1:
# print "error: can't find modfied row!?"
return
q.model.set_value(row_iter, col_num, result[0][0])
| gpl-2.0 |
robozman/pymumblegui | pymumble/pymumble_py3/commands.py | 1 | 1298 | # -*- coding: utf-8 -*-
from threading import Lock
from collections import deque
class Commands:
"""
Store to commands to be sent to the murmur server,
from whatever tread.
Each command has it's own lock semaphore to signal is received an answer
"""
def __init__(self):
self.id = 0
self.queue = deque()
self.lock = Lock()
def new_cmd(self, cmd):
"""Add a command to the queue"""
self.lock.acquire()
self.id += 1
cmd.msg_id = self.id
self.queue.appendleft(cmd)
cmd.lock.acquire()
self.lock.release()
return cmd.lock
def is_cmd(self):
"""Check if there is a command waiting in the queue"""
if len(self.queue) > 0:
return True
else:
return False
def pop_cmd(self):
"""Return the next command and remove it from the queue"""
self.lock.acquire()
if len(self.queue) > 0:
question = self.queue.pop()
self.lock.release()
return question
else:
self.lock.release()
return None
def answer(self, cmd):
"""Unlock the command to signal it's completion"""
cmd.lock.release()
| gpl-3.0 |
Azure/azure-sdk-for-python | sdk/servicebus/azure-servicebus/samples/sync_samples/schedule_topic_messages_and_cancellation.py | 1 | 2330 | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Example to show scheduling messages to and cancelling messages from a Service Bus Queue.
"""
# pylint: disable=C0111
import os
import datetime
from azure.servicebus import ServiceBusClient, ServiceBusMessage
CONNECTION_STR = os.environ["SERVICE_BUS_CONNECTION_STR"]
TOPIC_NAME = os.environ["SERVICE_BUS_TOPIC_NAME"]
def schedule_single_message(sender):
message = ServiceBusMessage("Message to be scheduled")
scheduled_time_utc = datetime.datetime.utcnow() + datetime.timedelta(seconds=30)
sequence_number = sender.schedule_messages(message, scheduled_time_utc)
return sequence_number
def schedule_multiple_messages(sender):
messages_to_schedule = []
for _ in range(10):
messages_to_schedule.append(ServiceBusMessage("Message to be scheduled"))
scheduled_time_utc = datetime.datetime.utcnow() + datetime.timedelta(seconds=30)
sequence_numbers = sender.schedule_messages(
messages_to_schedule, scheduled_time_utc
)
return sequence_numbers
def main():
servicebus_client = ServiceBusClient.from_connection_string(
conn_str=CONNECTION_STR, logging_enable=True
)
with servicebus_client:
sender = servicebus_client.get_topic_sender(topic_name=TOPIC_NAME)
with sender:
sequence_number = schedule_single_message(sender)
print(
"Single message is scheduled and sequence number is {}".format(
sequence_number
)
)
sequence_numbers = schedule_multiple_messages(sender)
print(
"Multiple messages are scheduled and sequence numbers are {}".format(
sequence_numbers
)
)
sender.cancel_scheduled_messages(sequence_number)
sender.cancel_scheduled_messages(sequence_numbers)
print("All scheduled messages are cancelled.")
if __name__ == "__main__":
main()
| mit |
platformio/platformio | platformio/commands/home/rpc/handlers/os.py | 1 | 4850 | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import io
import os
import shutil
from functools import cmp_to_key
import click
from twisted.internet import defer # pylint: disable=import-error
from platformio import app, fs, util
from platformio.commands.home import helpers
from platformio.compat import PY2, get_filesystem_encoding, glob_recursive
class OSRPC(object):
@staticmethod
@defer.inlineCallbacks
def fetch_content(uri, data=None, headers=None, cache_valid=None):
if not headers:
headers = {
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) "
"AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 "
"Safari/603.3.8"
)
}
cache_key = app.ContentCache.key_from_args(uri, data) if cache_valid else None
with app.ContentCache() as cc:
if cache_key:
result = cc.get(cache_key)
if result is not None:
defer.returnValue(result)
# check internet before and resolve issue with 60 seconds timeout
util.internet_on(raise_exception=True)
session = helpers.requests_session()
if data:
r = yield session.post(uri, data=data, headers=headers)
else:
r = yield session.get(uri, headers=headers)
r.raise_for_status()
result = r.text
if cache_valid:
with app.ContentCache() as cc:
cc.set(cache_key, result, cache_valid)
defer.returnValue(result)
def request_content(self, uri, data=None, headers=None, cache_valid=None):
if uri.startswith("http"):
return self.fetch_content(uri, data, headers, cache_valid)
if os.path.isfile(uri):
with io.open(uri, encoding="utf-8") as fp:
return fp.read()
return None
@staticmethod
def open_url(url):
return click.launch(url)
@staticmethod
def reveal_file(path):
return click.launch(
path.encode(get_filesystem_encoding()) if PY2 else path, locate=True
)
@staticmethod
def open_file(path):
return click.launch(path.encode(get_filesystem_encoding()) if PY2 else path)
@staticmethod
def is_file(path):
return os.path.isfile(path)
@staticmethod
def is_dir(path):
return os.path.isdir(path)
@staticmethod
def make_dirs(path):
return os.makedirs(path)
@staticmethod
def get_file_mtime(path):
return os.path.getmtime(path)
@staticmethod
def rename(src, dst):
return os.rename(src, dst)
@staticmethod
def copy(src, dst):
return shutil.copytree(src, dst, symlinks=True)
@staticmethod
def glob(pathnames, root=None):
if not isinstance(pathnames, list):
pathnames = [pathnames]
result = set()
for pathname in pathnames:
result |= set(
glob_recursive(os.path.join(root, pathname) if root else pathname)
)
return list(result)
@staticmethod
def list_dir(path):
def _cmp(x, y):
if x[1] and not y[1]:
return -1
if not x[1] and y[1]:
return 1
if x[0].lower() > y[0].lower():
return 1
if x[0].lower() < y[0].lower():
return -1
return 0
items = []
if path.startswith("~"):
path = fs.expanduser(path)
if not os.path.isdir(path):
return items
for item in os.listdir(path):
try:
item_is_dir = os.path.isdir(os.path.join(path, item))
if item_is_dir:
os.listdir(os.path.join(path, item))
items.append((item, item_is_dir))
except OSError:
pass
return sorted(items, key=cmp_to_key(_cmp))
@staticmethod
def get_logical_devices():
items = []
for item in util.get_logical_devices():
if item["name"]:
item["name"] = item["name"]
items.append(item)
return items
| apache-2.0 |
DdragonEver/flasky | app/api_1_0/authentication.py | 35 | 1258 | from flask import g, jsonify
from flask.ext.httpauth import HTTPBasicAuth
from ..models import User, AnonymousUser
from . import api
from .errors import unauthorized, forbidden
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(email_or_token, password):
if email_or_token == '':
g.current_user = AnonymousUser()
return True
if password == '':
g.current_user = User.verify_auth_token(email_or_token)
g.token_used = True
return g.current_user is not None
user = User.query.filter_by(email=email_or_token).first()
if not user:
return False
g.current_user = user
g.token_used = False
return user.verify_password(password)
@auth.error_handler
def auth_error():
return unauthorized('Invalid credentials')
@api.before_request
@auth.login_required
def before_request():
if not g.current_user.is_anonymous() and \
not g.current_user.confirmed:
return forbidden('Unconfirmed account')
@api.route('/token')
def get_token():
if g.current_user.is_anonymous() or g.token_used:
return unauthorized('Invalid credentials')
return jsonify({'token': g.current_user.generate_auth_token(
expiration=3600), 'expiration': 3600})
| mit |
smartboyathome/Wonderland-Engine | tests/CheshireCatTests/test_team_checks_attacks.py | 1 | 3636 | '''
Copyright (c) 2012 Alexander Abbott
This file is part of the Cheshire Cyber Defense Scoring Engine (henceforth
referred to as Cheshire).
Cheshire is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
Cheshire is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
more details.
You should have received a copy of the GNU Affero General Public License
along with Cheshire. If not, see <http://www.gnu.org/licenses/>.
'''
import json
from CheshireCat.utils import convert_datetime_to_timestamp, convert_all_datetime_to_timestamp
from tests import show_difference_between_dicts
from tests.CheshireCatTests import FlaskTestCase
class TestRestTeamChecksServicesInterface(FlaskTestCase):
def test_get_all_attack_checks_for_specific_team(self):
self.login_user('admin', 'admin')
rest_result = self.app.get('/teams/1/checks/attacks')
assert rest_result.status_code == 200
expected_result = [obj for obj in self.data['completed_checks'] if obj['team_id'] == '1' and obj['type']=='attacker']
json_result = json.loads(rest_result.data)
print rest_result.data
print expected_result
assert len(json_result) == len(expected_result)
for i in expected_result:
del i['team_id'], i['type']
convert_all_datetime_to_timestamp(i, ['timestamp', 'time_to_check'])
assert json_result == expected_result
def test_get_all_attack_checks_for_specific_team_with_params(self):
self.login_user('admin', 'admin')
query_data = {
"failure": "assured"
}
result_data = {
"type": "IllegalParameter",
"reason": "Parameters are not allowed for this interface."
}
result = self.app.get('/teams/1/checks/attacks', data=json.dumps(query_data))
print result.data
assert result.status_code == 403
assert json.loads(result.data) == result_data
def test_get_specific_attack_check_for_specific_team(self):
self.login_user('admin', 'admin')
rest_result = self.app.get('/teams/1/checks/attacks/MySecurityHole')
print rest_result.status_code, rest_result.data
assert rest_result.status_code == 200
expected_result = [obj for obj in self.data['completed_checks'] if obj['team_id'] == '1' and obj['type']=='attacker' and obj['id'] == 'MySecurityHole']
json_result = json.loads(rest_result.data)
assert len(json_result) == len(expected_result)
for i in expected_result:
del i['team_id'], i['type'], i['id']
convert_all_datetime_to_timestamp(i, ['timestamp', 'time_to_check'])
assert json_result == expected_result
def test_get_specific_attack_check_for_specific_team_with_params(self):
self.login_user('admin', 'admin')
query_data = {
"failure": "assured"
}
result_data = {
"type": "IllegalParameter",
"reason": "Parameters are not allowed for this interface."
}
result = self.app.get('/teams/1/checks/attacks/MySecurityHole', data=json.dumps(query_data))
print result.data
assert result.status_code == 403
assert json.loads(result.data) == result_data | agpl-3.0 |
aeischeid/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_memorizingfile.py | 496 | 4252 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for memorizingfile module."""
import StringIO
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import memorizingfile
class UtilTest(unittest.TestCase):
"""A unittest for memorizingfile module."""
def check(self, memorizing_file, num_read, expected_list):
for unused in range(num_read):
memorizing_file.readline()
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
self.assertEqual(expected, actual)
def check_with_size(self, memorizing_file, read_size, expected_list):
read_list = []
read_line = ''
while True:
line = memorizing_file.readline(read_size)
line_length = len(line)
self.assertTrue(line_length <= read_size)
if line_length == 0:
if read_line != '':
read_list.append(read_line)
break
read_line += line
if line[line_length - 1] == '\n':
read_list.append(read_line)
read_line = ''
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
self.assertEqual(len(expected_list), len(read_list))
for expected, actual, read in zip(expected_list, actual_list,
read_list):
self.assertEqual(expected, actual)
self.assertEqual(expected, read)
def test_get_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'))
self.check(memorizing_file, 3, ['Hello\n', 'World\n', 'Welcome'])
def test_get_memorized_lines_limit_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'), 2)
self.check(memorizing_file, 3, ['Hello\n', 'World\n'])
def test_get_memorized_lines_empty_file(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
''))
self.check(memorizing_file, 10, [])
def test_get_memorized_lines_with_size(self):
for size in range(1, 10):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'))
self.check_with_size(memorizing_file, size,
['Hello\n', 'World\n', 'Welcome'])
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
auduny/home-assistant | homeassistant/components/lifx_cloud/scene.py | 8 | 2787 | """Support for LIFX Cloud scenes."""
import asyncio
import logging
import aiohttp
from aiohttp.hdrs import AUTHORIZATION
import async_timeout
import voluptuous as vol
from homeassistant.components.scene import Scene
from homeassistant.const import CONF_TOKEN, CONF_TIMEOUT, CONF_PLATFORM
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
LIFX_API_URL = 'https://api.lifx.com/v1/{0}'
DEFAULT_TIMEOUT = 10
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'lifx_cloud',
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the scenes stored in the LIFX Cloud."""
token = config.get(CONF_TOKEN)
timeout = config.get(CONF_TIMEOUT)
headers = {
AUTHORIZATION: "Bearer {}".format(token),
}
url = LIFX_API_URL.format('scenes')
try:
httpsession = async_get_clientsession(hass)
with async_timeout.timeout(timeout, loop=hass.loop):
scenes_resp = await httpsession.get(url, headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.exception("Error on %s", url)
return False
status = scenes_resp.status
if status == 200:
data = await scenes_resp.json()
devices = []
for scene in data:
devices.append(LifxCloudScene(hass, headers, timeout, scene))
async_add_entities(devices)
return True
if status == 401:
_LOGGER.error("Unauthorized (bad token?) on %s", url)
return False
_LOGGER.error("HTTP error %d on %s", scenes_resp.status, url)
return False
class LifxCloudScene(Scene):
"""Representation of a LIFX Cloud scene."""
def __init__(self, hass, headers, timeout, scene_data):
"""Initialize the scene."""
self.hass = hass
self._headers = headers
self._timeout = timeout
self._name = scene_data["name"]
self._uuid = scene_data["uuid"]
@property
def name(self):
"""Return the name of the scene."""
return self._name
async def async_activate(self):
"""Activate the scene."""
url = LIFX_API_URL.format('scenes/scene_id:%s/activate' % self._uuid)
try:
httpsession = async_get_clientsession(self.hass)
with async_timeout.timeout(self._timeout, loop=self.hass.loop):
await httpsession.put(url, headers=self._headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.exception("Error on %s", url)
| apache-2.0 |
normtown/SickRage | lib/guessit/transfo/expected_series.py | 33 | 2545 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from guessit.containers import PropertiesContainer
from guessit.matcher import GuessFinder
from guessit.plugins.transformers import Transformer
class ExpectedSeries(Transformer):
def __init__(self):
Transformer.__init__(self, 230)
def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options):
naming_opts.add_argument('-S', '--expected-series', action='append', dest='expected_series',
help='Expected series to parse (can be used multiple times)')
def should_process(self, mtree, options=None):
return options and options.get('expected_series')
@staticmethod
def expected_series(string, node=None, options=None):
container = PropertiesContainer(enhance=True, canonical_from_pattern=False)
for expected_serie in options.get('expected_series'):
if expected_serie.startswith('re:'):
expected_serie = expected_serie[3:]
expected_serie = expected_serie.replace(' ', '-')
container.register_property('series', expected_serie, enhance=True)
else:
expected_serie = re.escape(expected_serie)
container.register_property('series', expected_serie, enhance=False)
found = container.find_properties(string, node, options)
return container.as_guess(found, string)
def supported_properties(self):
return ['series']
def process(self, mtree, options=None):
GuessFinder(self.expected_series, None, self.log, options).process_nodes(mtree.unidentified_leaves())
| gpl-3.0 |
Alex-Diez/python-tdd-katas | old-katas/string-compress-kata/day-4.py | 1 | 1339 | # -*- codeing: utf-8 -*-
class StringCompressor(object):
def compress(self, toCompress):
if toCompress is None:
return ""
else:
result = []
index = 0
length = len(toCompress)
while index < length:
counter = 1
index += 1
while index < length and toCompress[index - 1] == toCompress[index]:
counter += 1
index += 1
result.append(str(counter))
result.append(toCompress[index - 1])
return ''.join(result)
import unittest
class StringComperssorTest(unittest.TestCase):
def setUp(self):
self.compressor = StringCompressor()
def test_none_compressed_to_an_empty_line(self):
self.assertEqual("", self.compressor.compress(None))
def test_one_char_string(self):
self.assertEqual("1a", self.compressor.compress("a"))
def test_string_of_unique_chars(self):
self.assertEqual("1a1b1c", self.compressor.compress("abc"))
def test_compress_a_string_of_doubled_characters(self):
self.assertEqual("2a2b2c", self.compressor.compress("aabbcc"))
def test_an_empty_string_is_compressed_to_another_empty_string(self):
self.assertEqual("", self.compressor.compress(""))
| mit |
b-dollery/testing | plugins/inventory/ec2.py | 15 | 26335 | #!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import route53
import ConfigParser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
config = ConfigParser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Return all EC2 and RDS instances (if RDS is enabled)
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except ConfigParser.NoOptionError, e:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude');
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except ConfigParser.NoOptionError, e:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs)
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
for x in config.get('ec2', 'instance_filters', '').split(','):
filter_key, filter_value = x.split('=')
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.iteritems():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError, e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
print e
sys.exit(1)
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = rds.connect_to_region(region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError, e:
if not e.reason == "Forbidden":
print "Looks like AWS RDS is down: "
print e
sys.exit(1)
def get_instance(self, region, instance_id):
''' Gets details about a specific instance '''
if self.eucalyptus:
conn = boto.connect_euca(self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only want running instances unless all_instances is True
if not self.all_instances and instance.state != 'running':
return
# Select the best destination address
if instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable)
else:
dest = getattr(instance, self.destination_variable)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(dest):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(dest):
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
else:
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.placement, dest)
if self.nested_groups:
self.push_group(self.inventory, region, instance.placement)
# Inventory: Group by instance type
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by security group
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by tag keys
for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
#if instance.subnet_id:
#dest = getattr(instance, self.vpc_destination_variable)
#else:
#dest = getattr(instance, self.destination_variable)
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
else:
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
if self.nested_groups:
self.push_group(self.inventory, region, instance.availability_zone)
# Inventory: Group by instance type
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by security group
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by engine
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif type(value) in [str, unicode]:
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.iteritems():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
| gpl-3.0 |
googleapis/googleapis-gen | google/appengine/v1/google-cloud-appengine-v1-py/google/cloud/appengine_admin_v1/services/versions/transports/base.py | 1 | 9187 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.appengine_admin_v1.types import appengine
from google.cloud.appengine_admin_v1.types import version
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-appengine-admin',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class VersionsTransport(abc.ABC):
"""Abstract transport class for Versions."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/appengine.admin',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
)
DEFAULT_HOST: str = 'appengine.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_versions: gapic_v1.method.wrap_method(
self.list_versions,
default_timeout=None,
client_info=client_info,
),
self.get_version: gapic_v1.method.wrap_method(
self.get_version,
default_timeout=None,
client_info=client_info,
),
self.create_version: gapic_v1.method.wrap_method(
self.create_version,
default_timeout=None,
client_info=client_info,
),
self.update_version: gapic_v1.method.wrap_method(
self.update_version,
default_timeout=None,
client_info=client_info,
),
self.delete_version: gapic_v1.method.wrap_method(
self.delete_version,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_versions(self) -> Callable[
[appengine.ListVersionsRequest],
Union[
appengine.ListVersionsResponse,
Awaitable[appengine.ListVersionsResponse]
]]:
raise NotImplementedError()
@property
def get_version(self) -> Callable[
[appengine.GetVersionRequest],
Union[
version.Version,
Awaitable[version.Version]
]]:
raise NotImplementedError()
@property
def create_version(self) -> Callable[
[appengine.CreateVersionRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def update_version(self) -> Callable[
[appengine.UpdateVersionRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def delete_version(self) -> Callable[
[appengine.DeleteVersionRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
__all__ = (
'VersionsTransport',
)
| apache-2.0 |
CamelBackNotation/CarnotKE | jyhton/lib-python/2.7/distutils/cygwinccompiler.py | 63 | 17732 | """distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if self.gcc_version < '4' or is_cygwingcc():
no_cygwin = ' -mno-cygwin'
else:
no_cygwin = ''
self.set_executables(compiler='gcc%s -O -Wall' % no_cygwin,
compiler_so='gcc%s -mdll -O -Wall' % no_cygwin,
compiler_cxx='g++%s -O -Wall' % no_cygwin,
linker_exe='gcc%s' % no_cygwin,
linker_so='%s%s %s %s'
% (self.linker_dll, no_cygwin,
shared_option, entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
def is_cygwingcc():
'''Try to determine if the gcc that would be used is from cygwin.'''
out = os.popen('gcc -dumpmachine', 'r')
out_string = out.read()
out.close()
# out_string is the target triplet cpu-vendor-os
# Cygwin's gcc sets the os to 'cygwin'
return out_string.strip().endswith('cygwin')
| apache-2.0 |
raccoongang/edx-platform | lms/djangoapps/mobile_api/migrations/0001_initial.py | 86 | 1229 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MobileApiConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('video_profiles', models.TextField(help_text=b'A comma-separated list of names of profiles to include for videos returned from the mobile API.', blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
| agpl-3.0 |
apanda/phantomjs-intercept | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py | 118 | 11747 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
class TestRunResults(object):
def __init__(self, expectations, num_tests):
self.total = num_tests
self.remaining = self.total
self.expectations = expectations
self.expected = 0
self.unexpected = 0
self.unexpected_failures = 0
self.unexpected_crashes = 0
self.unexpected_timeouts = 0
self.tests_by_expectation = {}
self.tests_by_timeline = {}
self.results_by_name = {} # Map of test name to the last result for the test.
self.all_results = [] # All results from a run, including every iteration of every test.
self.unexpected_results_by_name = {}
self.failures_by_name = {}
self.total_failures = 0
self.expected_skips = 0
for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
self.tests_by_expectation[expectation] = set()
for timeline in test_expectations.TestExpectations.TIMELINES.values():
self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
self.slow_tests = set()
self.interrupted = False
def add(self, test_result, expected, test_is_slow):
self.tests_by_expectation[test_result.type].add(test_result.test_name)
self.results_by_name[test_result.test_name] = test_result
if test_result.type != test_expectations.SKIP:
self.all_results.append(test_result)
self.remaining -= 1
if len(test_result.failures):
self.total_failures += 1
self.failures_by_name[test_result.test_name] = test_result.failures
if expected:
self.expected += 1
if test_result.type == test_expectations.SKIP:
self.expected_skips += 1
else:
self.unexpected_results_by_name[test_result.test_name] = test_result
self.unexpected += 1
if len(test_result.failures):
self.unexpected_failures += 1
if test_result.type == test_expectations.CRASH:
self.unexpected_crashes += 1
elif test_result.type == test_expectations.TIMEOUT:
self.unexpected_timeouts += 1
if test_is_slow:
self.slow_tests.add(test_result.test_name)
class RunDetails(object):
def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
self.exit_code = exit_code
self.summarized_results = summarized_results
self.initial_results = initial_results
self.retry_results = retry_results
self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
def _interpret_test_failures(failures):
test_dict = {}
failure_types = [type(failure) for failure in failures]
# FIXME: get rid of all this is_* values once there is a 1:1 map between
# TestFailure type and test_expectations.EXPECTATION.
if test_failures.FailureMissingAudio in failure_types:
test_dict['is_missing_audio'] = True
if test_failures.FailureMissingResult in failure_types:
test_dict['is_missing_text'] = True
if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
test_dict['is_missing_image'] = True
if 'image_diff_percent' not in test_dict:
for failure in failures:
if isinstance(failure, test_failures.FailureImageHashMismatch) or isinstance(failure, test_failures.FailureReftestMismatch):
test_dict['image_diff_percent'] = failure.diff_percent
return test_dict
def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry):
"""Returns a dictionary containing a summary of the test runs, with the following fields:
'version': a version indicator
'fixable': The number of fixable tests (NOW - PASS)
'skipped': The number of skipped tests (NOW & SKIPPED)
'num_regressions': The number of non-flaky failures
'num_flaky': The number of flaky failures
'num_missing': The number of tests with missing results
'num_passes': The number of unexpected passes
'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
"""
results = {}
results['version'] = 3
tbe = initial_results.tests_by_expectation
tbt = initial_results.tests_by_timeline
results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
num_passes = 0
num_flaky = 0
num_missing = 0
num_regressions = 0
keywords = {}
for expecation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
keywords[expectation_enum] = expecation_string.upper()
for modifier_string, modifier_enum in test_expectations.TestExpectations.MODIFIERS.iteritems():
keywords[modifier_enum] = modifier_string.upper()
tests = {}
for test_name, result in initial_results.results_by_name.iteritems():
# Note that if a test crashed in the original run, we ignore
# whether or not it crashed when we retried it (if we retried it),
# and always consider the result not flaky.
expected = expectations.get_expectations_string(test_name)
result_type = result.type
actual = [keywords[result_type]]
if result_type == test_expectations.SKIP:
continue
test_dict = {}
if result.has_stderr:
test_dict['has_stderr'] = True
if result.reftest_type:
test_dict.update(reftest_type=list(result.reftest_type))
if expectations.has_modifier(test_name, test_expectations.WONTFIX):
test_dict['wontfix'] = True
if result_type == test_expectations.PASS:
num_passes += 1
# FIXME: include passing tests that have stderr output.
if expected == 'PASS':
continue
elif result_type == test_expectations.CRASH:
if test_name in initial_results.unexpected_results_by_name:
num_regressions += 1
elif result_type == test_expectations.MISSING:
if test_name in initial_results.unexpected_results_by_name:
num_missing += 1
elif test_name in initial_results.unexpected_results_by_name:
if retry_results and test_name not in retry_results.unexpected_results_by_name:
actual.extend(expectations.get_expectations_string(test_name).split(" "))
num_flaky += 1
elif retry_results:
retry_result_type = retry_results.unexpected_results_by_name[test_name].type
if result_type != retry_result_type:
if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and retry_result_type == test_expectations.IMAGE_PLUS_TEXT:
num_regressions += 1
else:
num_flaky += 1
actual.append(keywords[retry_result_type])
else:
num_regressions += 1
else:
num_regressions += 1
test_dict['expected'] = expected
test_dict['actual'] = " ".join(actual)
test_dict.update(_interpret_test_failures(result.failures))
if retry_results:
retry_result = retry_results.unexpected_results_by_name.get(test_name)
if retry_result:
test_dict.update(_interpret_test_failures(retry_result.failures))
# Store test hierarchically by directory. e.g.
# foo/bar/baz.html: test_dict
# foo/bar/baz1.html: test_dict
#
# becomes
# foo: {
# bar: {
# baz.html: test_dict,
# baz1.html: test_dict
# }
# }
parts = test_name.split('/')
current_map = tests
for i, part in enumerate(parts):
if i == (len(parts) - 1):
current_map[part] = test_dict
break
if part not in current_map:
current_map[part] = {}
current_map = current_map[part]
results['tests'] = tests
results['num_passes'] = num_passes
results['num_flaky'] = num_flaky
results['num_missing'] = num_missing
results['num_regressions'] = num_regressions
results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
results['interrupted'] = initial_results.interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
results['layout_tests_dir'] = port_obj.layout_tests_dir()
results['has_wdiff'] = port_obj.wdiff_available()
results['has_pretty_patch'] = port_obj.pretty_patch_available()
results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
try:
# We only use the svn revision for using trac links in the results.html file,
# Don't do this by default since it takes >100ms.
# FIXME: Do we really need to populate this both here and in the json_results_generator?
if port_obj.get_option("builder_name"):
port_obj.host.initialize_scm()
results['revision'] = port_obj.host.scm().head_svn_revision()
except Exception, e:
_log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
# Handle cases where we're running outside of version control.
import traceback
_log.debug('Failed to learn head svn revision:')
_log.debug(traceback.format_exc())
results['revision'] = ""
return results
| bsd-3-clause |
n0max/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/sauce.py | 8 | 6465 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import glob
import os
import shutil
import subprocess
import tarfile
import tempfile
import time
from cStringIO import StringIO as CStringIO
import requests
from .base import Browser, ExecutorBrowser, require_arg
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor,
SeleniumRefTestExecutor)
here = os.path.split(__file__)[0]
__wptrunner__ = {"product": "sauce",
"check_args": "check_args",
"browser": "SauceBrowser",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options"}
def get_capabilities(**kwargs):
browser_name = kwargs["sauce_browser"]
platform = kwargs["sauce_platform"]
version = kwargs["sauce_version"]
build = kwargs["sauce_build"]
tags = kwargs["sauce_tags"]
tunnel_id = kwargs["sauce_tunnel_id"]
prerun_script = {
"MicrosoftEdge": {
"executable": "sauce-storage:edge-prerun.bat",
"background": False,
},
"safari": {
"executable": "sauce-storage:safari-prerun.sh",
"background": False,
}
}
capabilities = {
"browserName": browser_name,
"build": build,
"disablePopupHandler": True,
"name": "%s %s on %s" % (browser_name, version, platform),
"platform": platform,
"public": "public",
"selenium-version": "3.3.1",
"tags": tags,
"tunnel-identifier": tunnel_id,
"version": version,
"prerun": prerun_script.get(browser_name)
}
if browser_name == 'MicrosoftEdge':
capabilities['selenium-version'] = '2.4.8'
return capabilities
def get_sauce_config(**kwargs):
browser_name = kwargs["sauce_browser"]
sauce_user = kwargs["sauce_user"]
sauce_key = kwargs["sauce_key"]
hub_url = "%s:%s@localhost:4445" % (sauce_user, sauce_key)
data = {
"url": "http://%s/wd/hub" % hub_url,
"browserName": browser_name,
"capabilities": get_capabilities(**kwargs)
}
return data
def check_args(**kwargs):
require_arg(kwargs, "sauce_browser")
require_arg(kwargs, "sauce_platform")
require_arg(kwargs, "sauce_version")
require_arg(kwargs, "sauce_user")
require_arg(kwargs, "sauce_key")
def browser_kwargs(test_type, run_info_data, **kwargs):
sauce_config = get_sauce_config(**kwargs)
return {"sauce_config": sauce_config}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
executor_kwargs["capabilities"] = get_capabilities(**kwargs)
return executor_kwargs
def env_extras(**kwargs):
return [SauceConnect(**kwargs)]
def env_options():
return {"host": "web-platform.test",
"bind_hostname": "true",
"supports_debugger": False}
def get_tar(url, dest):
resp = requests.get(url, stream=True)
resp.raise_for_status()
with tarfile.open(fileobj=CStringIO(resp.raw.read())) as f:
f.extractall(path=dest)
class SauceConnect():
def __init__(self, **kwargs):
self.sauce_user = kwargs["sauce_user"]
self.sauce_key = kwargs["sauce_key"]
self.sauce_tunnel_id = kwargs["sauce_tunnel_id"]
self.sauce_connect_binary = kwargs.get("sauce_connect_binary")
self.sc_process = None
self.temp_dir = None
def __enter__(self, options):
if not self.sauce_connect_binary:
self.temp_dir = tempfile.mkdtemp()
get_tar("https://saucelabs.com/downloads/sc-4.4.9-linux.tar.gz", self.temp_dir)
self.sauce_connect_binary = glob.glob(os.path.join(self.temp_dir, "sc-*-linux/bin/sc"))[0]
self.upload_prerun_exec('edge-prerun.bat')
self.upload_prerun_exec('safari-prerun.sh')
self.sc_process = subprocess.Popen([
self.sauce_connect_binary,
"--user=%s" % self.sauce_user,
"--api-key=%s" % self.sauce_key,
"--no-remove-colliding-tunnels",
"--tunnel-identifier=%s" % self.sauce_tunnel_id,
"--metrics-address=0.0.0.0:9876",
"--readyfile=./sauce_is_ready",
"--tunnel-domains",
"web-platform.test",
"*.web-platform.test"
])
while not os.path.exists('./sauce_is_ready') and not self.sc_process.poll():
time.sleep(5)
if self.sc_process.returncode is not None and self.sc_process.returncode > 0:
raise SauceException("Unable to start Sauce Connect Proxy. Process exited with code %s", self.sc_process.returncode)
def __exit__(self, exc_type, exc_val, exc_tb):
self.sc_process.terminate()
if self.temp_dir and os.path.exists(self.temp_dir):
try:
shutil.rmtree(self.temp_dir)
except OSError:
pass
def upload_prerun_exec(self, file_name):
auth = (self.sauce_user, self.sauce_key)
url = "https://saucelabs.com/rest/v1/storage/%s/%s?overwrite=true" % (self.sauce_user, file_name)
with open(os.path.join(here, 'sauce_setup', file_name), 'rb') as f:
requests.post(url, data=f, auth=auth)
class SauceException(Exception):
pass
class SauceBrowser(Browser):
init_timeout = 300
def __init__(self, logger, sauce_config):
Browser.__init__(self, logger)
self.sauce_config = sauce_config
def start(self):
pass
def stop(self, force=False):
pass
def pid(self):
return None
def is_alive(self):
# TODO: Should this check something about the connection?
return True
def cleanup(self):
pass
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.sauce_config["url"]}
| mpl-2.0 |
xupit3r/askpgh | askbot/tests/utils_tests.py | 3 | 5591 | from django.test import TestCase
from askbot.tests.utils import with_settings
from askbot.utils.url_utils import urls_equal
from askbot.utils.html import absolutize_urls
from askbot.utils.html import replace_links_with_text
from askbot.utils.html import get_text_from_html
from askbot.conf import settings as askbot_settings
class UrlUtilsTests(TestCase):
def tests_urls_equal(self):
e = urls_equal
self.assertTrue(e('', ''))
self.assertTrue(e('', '/', True))
self.assertTrue(e('http://cnn.com', 'http://cnn.com/', True))
self.assertFalse(e('https://cnn.com', 'http://cnn.com'))
self.assertFalse(e('http://cnn.com:80', 'http://cnn.com:8000'))
self.assertTrue(e('http://cnn.com/path', 'http://cnn.com/path/', True))
self.assertFalse(e('http://cnn.com/path', 'http://cnn.com/path/'))
class ReplaceLinksWithTextTests(TestCase):
"""testing correctness of `askbot.utils.html.replace_links_with_text"""
def test_local_link_not_replaced(self):
text = '<a href="/some-link">some link</a>'
self.assertEqual(replace_links_with_text(text), text)
def test_link_without_url_replaced(self):
text = '<a>some link</a>'
self.assertEqual(replace_links_with_text(text), 'some link')
def test_external_link_without_text_replaced(self):
text = '<a href="https://example.com/"></a>'
#in this case we delete the link
self.assertEqual(replace_links_with_text(text), '')
def test_external_link_with_text_replaced(self):
text = '<a href="https://example.com/">some link</a>'
self.assertEqual(
replace_links_with_text(text),
'https://example.com/ (some link)'
)
def test_local_image_not_replaced(self):
text = '<img src="/some-image.gif"/>'
self.assertEqual(replace_links_with_text(text), text)
def test_local_url_with_hotlinked_image_replaced(self):
text = '<a href="/some-link"><img src="http://example.com/img.png" alt="picture""> some text</a>'
self.assertEqual(
replace_links_with_text(text),
'<a href="/some-link">http://example.com/img.png (picture) some text</a>'
)
def test_hotlinked_image_without_alt_replaced(self):
text = '<img src="https://example.com/some-image.gif"/>'
self.assertEqual(
replace_links_with_text(text),
'https://example.com/some-image.gif'
)
def test_hotlinked_image_with_alt_replaced(self):
text = '<img src="https://example.com/some-image.gif" alt="picture"/>'
self.assertEqual(
replace_links_with_text(text),
'https://example.com/some-image.gif (picture)'
)
class HTMLUtilsTests(TestCase):
"""tests for :mod:`askbot.utils.html` module"""
@with_settings(APP_URL='http://example.com')
def test_absolutize_urls(self):
text = """<img class="junk" src="/some.gif"> <img class="junk" src="/cat.gif"> <IMG SRC='/some.png'>"""
#jinja register.filter decorator works in a weird way
self.assertEqual(
absolutize_urls(text),
'<img class="junk" src="http://example.com/some.gif"> <img class="junk" src="http://example.com/cat.gif"> <IMG SRC="http://example.com/some.png">'
)
text = """<a class="junk" href="/something">link</a> <A HREF='/something'>link</A>"""
#jinja register.filter decorator works in a weird way
self.assertEqual(
absolutize_urls(text),
'<a class="junk" href="http://example.com/something">link</a> <A HREF="http://example.com/something">link</A>'
)
text = '<img src="/upfiles/13487900323638005.png" alt="" />'
self.assertEqual(
absolutize_urls(text),
'<img src="http://example.com/upfiles/13487900323638005.png" alt="" />'
)
text = 'ohaouhaosthoanstoahuaou<br /><img src="/upfiles/13487906221942257.png" alt="" /><img class="gravatar" title="Evgeny4" src="http://kp-dev.askbot.com/avatar/render_primary/5/32/" alt="Evgeny4 gravatar image" width="32" height="32" />'
self.assertEqual(
absolutize_urls(text),
'ohaouhaosthoanstoahuaou<br /><img src="http://example.com/upfiles/13487906221942257.png" alt="" /><img class="gravatar" title="Evgeny4" src="http://kp-dev.askbot.com/avatar/render_primary/5/32/" alt="Evgeny4 gravatar image" width="32" height="32" />'
)
text = '<a href="/upfiles/13487909784287052.png"><img src="/upfiles/13487909942351405.png" alt="" /></a><img src="http://i2.cdn.turner.com/cnn/dam/assets/120927033530-ryder-cup-captains-wall-4-tease.jpg" alt="" width="160" height="90" border="0" />and some text<br />aouaosutoaehut'
self.assertEqual(
absolutize_urls(text),
'<a href="http://example.com/upfiles/13487909784287052.png"><img src="http://example.com/upfiles/13487909942351405.png" alt="" /></a><img src="http://i2.cdn.turner.com/cnn/dam/assets/120927033530-ryder-cup-captains-wall-4-tease.jpg" alt="" width="160" height="90" border="0" />and some text<br />aouaosutoaehut'
)
def test_get_text_from_html(self):
self.assertEqual(
get_text_from_html('ataoesa uau <a>link</a>aueaotuosu ao <a href="http://cnn.com">CNN!</a>\nnaouaouuau<img> <img src="http://cnn.com/1.png"/> <img src="http://cnn.com/2.png" alt="sometext">'),
u'ataoesa uau linkaueaotuosu ao http://cnn.com (CNN!)\n\nnaouaouuau http://cnn.com/1.png http://cnn.com/2.png (sometext)'
)
| gpl-3.0 |
CharlesGarrocho/JUMP | detector/detector_movimento.py | 2 | 18706 | #!/usr/bin/env python
# coding:utf-8
import cv2
import numpy as np
import sys
import json
import time
from optparse import OptionParser
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer
from multiprocessing import Process
import threading
processo = None
class Movimentos(object):
'''
Classe para simular uma enumeração com os possiveis valores para o movimento
'''
EM_PE = 0
SUBINDO = 1
DESCENDO = -1
AGACHADO = -2
class GerenciadorEstadoJogador(object):
'''
Classe para gerenciar o estado do jogador
'''
# Constantes
ARQUIVO_ESTADO_JOGADOR = './file/estado_jogador.json'
ARQUIVO_ESTADO_VIDA_JOGADOR = './file/estado_jogo_cliente.json'
class EstadosJogador(object):
'''
Classe para simular uma enumeração com os etados do jogador
'''
EM_PE = 0
PULANDO = 1
AGACHADO = -1
def __init__(self, conexao=None):
self.conexao = conexao
self.atualizar_estado(Movimentos.EM_PE, False)
self._set_vivo(True)
def atualizar_estado(self, movimento, calibrado):
'''
Atualiza o estado do jogador no arquivo
:param movimento: movimento do jogador
:param calibrado: se a camera foi calibrada com o jogador
'''
novo_estado = 0
if movimento == Movimentos.EM_PE:
novo_estado = self.EstadosJogador.EM_PE
elif movimento == Movimentos.SUBINDO:
novo_estado = self.EstadosJogador.PULANDO
elif movimento == Movimentos.AGACHADO:
novo_estado = self.EstadosJogador.AGACHADO
estado_jogador = {"movimento": novo_estado, "calibrado": calibrado}
str_json = json.dumps(estado_jogador)
if self.conexao is None:
# Recria o arquivo e insere o novo estado do jogador
print 'escreveu no arquivo: ', str_json
with open(self.ARQUIVO_ESTADO_JOGADOR, 'w') as arq:
arq.write(str_json)
else:
try:
print self.conexao.address
print 'Enviou: ', str_json
self.conexao.sendMessage(str_json)
except:
print 'Não foi possível enviar a mensagem ao cliente'
def _set_vivo(self, vivo):
'''
Seta o estado vivo do jogador
:param vivo: se o jogador está vivo
'''
try:
with open(self.ARQUIVO_ESTADO_VIDA_JOGADOR, 'r') as arq:
estado_jogo = json.loads(arq.read())
estado_jogo['jogador_vivo'] = vivo
with open(self.ARQUIVO_ESTADO_VIDA_JOGADOR, 'w') as arq:
arq.write(json.dumps(estado_jogo))
except ValueError as e:
print e
def is_vivo(self):
'''
verifica se o jogador está vivo ou não
:returns: True se o jogador está vivo e False se não
'''
try:
with open(self.ARQUIVO_ESTADO_VIDA_JOGADOR) as arq:
vivo_str = arq.read()
print vivo_str
vivo = json.loads(vivo_str)
return vivo['jogador_vivo']
except ValueError as e:
print e
return False
def tela_atual(self):
'''
retorna a tela atual do jogo
:returns: a tela atual do jogo
'''
with open(self.ARQUIVO_ESTADO_VIDA_JOGADOR) as arq:
str_estado_jogo = arq.read()
estado_jogo = json.loads(str_estado_jogo)
tela = estado_jogo['tela']
return tela
def finish(self):
'''
finaliza o estado do gerenciador
'''
self.atualizar_estado(Movimentos.EM_PE, False)
self._set_vivo(True)
class DetectorMovimento(threading.Thread):
'''
Classe para detectar o movimento
'''
# Constantes
ALTURA_QUADRADO_CENTRO = 80
LARGURA_QUADRADO_CENTRO = 200
MARGEM_ERRO_CALIBRACAO = 20
# evita que um simples aumento na altura da pessoa seja considerado um pulo
MARGEM_TOLERANCIA = 70
NUM_Y_ANALIZADOS = 5
NUM_Y_GUARDADOS = 5
ALTURA_AGACHAMENTO = 340
class VariacoesMovimento(object):
'''
Classe para simular uma enumeração com as variações de movimento
'''
PARA_CIMA = 1
PARA_BAIXO = -1
SEM_MOVIMENTO = 0
def __init__(self, id_camera=0, agachar_desabilitado=False, conexao=None):
'''
Construtor da Classe
:param id_camera: identificador da camera que será utilizada, o padrão é 0
'''
threading.Thread.__init__(self)
self.conexao = conexao
self.movimento = Movimentos.EM_PE
self.id_camera = id_camera
self.agachar_desabilitado = agachar_desabilitado
if conexao is None:
self.camera = cv2.VideoCapture(self.id_camera)
else:
self.camera = conexao.camera
if not self.camera.isOpened():
raise IOError('Não foi possivel ter acesso a camera')
if self.NUM_Y_ANALIZADOS > self.NUM_Y_GUARDADOS:
raise ValueError(
"Número de Y analisados deve ser igual ou menor que o número de Y guardados")
self.width, self.height = self.camera.get(3), self.camera.get(4)
print 'Resolução da camera {0} x {1}'.format(self.width, self.height)
self.ys = []
self.desenhar_linhas = False
self.calibrado = False
self.gerenciador_estado_jogador = GerenciadorEstadoJogador(
conexao=self.conexao)
def return_name(self):
'''
Retorna o nome do processo
:returns: nome do processo
'''
return 'Processo de detecção de movimentos'
def run(self):
'''
Inicia a detecção
'''
return self.iniciar()
def get_thresholded_image(self, hsv):
'''
Gera uma faixa de cor
:param hsv: imagem no formato de cor hsv
:returns: a faixa de cor
'''
min_cor = np.array((110, 100, 80), np.uint8)
max_cor = np.array((140, 190, 190), np.uint8)
faixa_cor = cv2.inRange(hsv, min_cor, max_cor)
return faixa_cor
def verificar_movimento(self):
'''
Verifica se houve movimento e se foi para baixo ou para cima
:returns: 0 se não houve movimento, 1 se houve movimento para cima e -1 se houve movimento para baixo
'''
ultimos_valores_y = [0]
if len(self.ys) >= self.NUM_Y_ANALIZADOS:
ultimos_valores_y = self.ys[
len(self.ys) - self.NUM_Y_ANALIZADOS:len(self.ys)]
# houve diferenca maior que a margem entre dois pontos Y dentro do
# numero de pontos analizados
if max(ultimos_valores_y) - min(ultimos_valores_y) > self.MARGEM_TOLERANCIA:
ultimo_y = self.ys[len(self.ys) - 1]
primeiro_y = self.ys[0]
if primeiro_y < ultimo_y: # ta descendo
return self.VariacoesMovimento.PARA_BAIXO
else: # ta subindo
return self.VariacoesMovimento.PARA_CIMA
else:
return self.VariacoesMovimento.SEM_MOVIMENTO
def iniciar(self):
'''
Inicia a detecção
'''
# so inica a deteccao caso o jogo esteja no menu
while self.gerenciador_estado_jogador.tela_atual() != 'menu':
print 'Jogo não está na tela de menu'
time.sleep(0.5)
momento_pulo = {}
momento_agachar = {}
centro_x, centro_y = (int)(self.width / 2), (int)(self.height / 2)
# print 'Numero de frames:
# {0}'.format(self.camera.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
contador = 0
while(self.camera.isOpened()):
contador = contador + 1
# a cada N loops ele verifica se o jogador ta vivo
if contador % 50 == 0:
if not self.gerenciador_estado_jogador.is_vivo():
print 'Jogador perdeu'
break
_, frame = self.camera.read()
frame = cv2.flip(frame, 1)
blur = cv2.medianBlur(frame, 5)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
faixa_cor = self.get_thresholded_image(hsv)
erode = cv2.erode(faixa_cor, None, iterations=3)
dilate = cv2.dilate(erode, None, iterations=10)
contours, hierarchy = cv2.findContours(
dilate, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# desenha o quadrado no centro, para calibrar
cv2.rectangle(
frame, (centro_x - (self.LARGURA_QUADRADO_CENTRO / 2),
centro_y - (self.ALTURA_QUADRADO_CENTRO / 2)),
(centro_x + (self.LARGURA_QUADRADO_CENTRO / 2), centro_y + (self.ALTURA_QUADRADO_CENTRO / 2)), [0, 255, 0], 2)
if not self.calibrado:
self.ys = []
momento_pulo['y'] = None
momento_agachar['y'] = None
if contours:
maior_area = 0
maior_contorno = contours[0]
for cont in contours:
cx, cy, cw, ch = cv2.boundingRect(cont)
area = cw * ch
if area > maior_area:
maior_area = area
maior_contorno = cont
x, y, w, h = cv2.boundingRect(maior_contorno)
cx, cy = x + w / 2, y + h / 2
# verifica se ta no centro
if y > centro_y - (self.ALTURA_QUADRADO_CENTRO / 2) - self.MARGEM_ERRO_CALIBRACAO and \
y < centro_y - (self.ALTURA_QUADRADO_CENTRO / 2) + self.MARGEM_ERRO_CALIBRACAO and \
y + h > centro_y + (self.ALTURA_QUADRADO_CENTRO / 2) - self.MARGEM_ERRO_CALIBRACAO and \
y + h < centro_y + (self.ALTURA_QUADRADO_CENTRO / 2) + self.MARGEM_ERRO_CALIBRACAO:
if not self.calibrado:
print 'Calibrou'
self.calibrado = True
self.gerenciador_estado_jogador.atualizar_estado(
self.movimento, self.calibrado)
# dentro do quadrado
cv2.rectangle(
frame, (centro_x - (self.LARGURA_QUADRADO_CENTRO / 2),
centro_y - (self.ALTURA_QUADRADO_CENTRO / 2)),
(centro_x + (self.LARGURA_QUADRADO_CENTRO / 2), centro_y + (self.ALTURA_QUADRADO_CENTRO / 2)), [0, 0, 255], 2)
# print hsv.item(cy, cx, 0), hsv.item(cy, cx, 1), hsv.item(cy, cx, 2)
# if 100 < hsv.item(cy, cx, 0) < 120:
cv2.rectangle(frame, (x, y), (x + w, y + h), [255, 0, 0], 2)
if len(self.ys) >= self.NUM_Y_GUARDADOS:
self.ys = self.ys[1:self.NUM_Y_GUARDADOS]
self.ys.append(y)
# ta guardando ate NUM_Y_GUARDADOS Y
if self.calibrado:
# verifica o tipo do movimento, 1 para subiu e -1 para
# desceu e 0 para nao movimentou
variacao_movimento = self.verificar_movimento()
mudou_movimento = False
if variacao_movimento:
# guarda o movimento antigo, mas pra nada
movimento_antigo = self.movimento
# subiu, mas o que houve?
if variacao_movimento == self.VariacoesMovimento.PARA_CIMA:
# pulou
if self.movimento == Movimentos.EM_PE:
self.movimento = Movimentos.SUBINDO
momento_pulo['y'] = y
mudou_movimento = True
# levantou
elif self.movimento == Movimentos.AGACHADO:
# and y > momento_agachar['y'] -
# self.MARGEM_TOLERANCIA
if momento_agachar['y'] != None and y < momento_agachar['y'] + self.MARGEM_TOLERANCIA:
self.movimento = Movimentos.EM_PE
mudou_movimento = True
# desceu, mas o que houve?
elif variacao_movimento == self.VariacoesMovimento.PARA_BAIXO:
# agachou
if self.movimento == Movimentos.EM_PE and not self.agachar_desabilitado and y > self.ALTURA_AGACHAMENTO:
momento_agachar['y'] = y
self.movimento = Movimentos.AGACHADO
mudou_movimento = True
# ta descendo do pulo
elif self.movimento == Movimentos.SUBINDO:
self.movimento = Movimentos.DESCENDO
mudou_movimento = True
if self.movimento == Movimentos.DESCENDO:
# voltou ao chao
# and y < momento_pulo['y'] +
# self.MARGEM_TOLERANCIA:
if momento_pulo['y'] != None and y > momento_pulo['y'] - self.MARGEM_TOLERANCIA:
self.movimento = Movimentos.EM_PE
momento_pulo['y'] = None
mudou_movimento = True
# print 'mov:{0} mov_ant: {1} mov_var:
# {2}'.format(self.movimento, movimento_antigo,
# variacao_movimento)
if mudou_movimento:
if self.movimento == Movimentos.SUBINDO:
print 'Pulou em px: {0}'.format(momento_pulo['y'])
elif self.movimento == Movimentos.AGACHADO:
print 'Agachou em px: {0}'.format(momento_agachar['y'])
elif self.movimento == Movimentos.EM_PE:
print 'De pé em px: {0}'.format(y)
self.gerenciador_estado_jogador.atualizar_estado(
self.movimento, self.calibrado)
# print self.ys
# nao houve variacao grande entre os pontos
else:
# and y < momento_pulo['y'] + self.MARGEM_TOLERANCIA:
if momento_pulo['y'] != None and y > momento_pulo['y'] - self.MARGEM_TOLERANCIA:
if self.movimento == Movimentos.DESCENDO:
print 'De pé em px: {0}'.format(y)
self.movimento = Movimentos.EM_PE
momento_pulo['y'] = None
self.gerenciador_estado_jogador.atualizar_estado(
self.movimento, self.calibrado)
mudou_movimento = True
# and y > momento_agachar['y'] - self.MARGEM_TOLERANCIA:
# não considera a margem de tolerancia, pois ao agachar
# ele pode ja levantar. O ideal seria uma outra margem,
# mas menor
if momento_agachar['y'] != None and y < momento_agachar['y'] - self.MARGEM_TOLERANCIA:
if self.movimento == Movimentos.AGACHADO:
print 'De pé em px: {0}'.format(y)
self.movimento = Movimentos.EM_PE
momento_agachar['y'] = None
self.gerenciador_estado_jogador.atualizar_estado(
self.movimento, self.calibrado)
mudou_movimento = True
if self.movimento == Movimentos.EM_PE and mudou_movimento:
# if momento_agachar['y']:
# for i in self.ys:
# if i < momento_agachar['y']:
# self.ys.remove(i)
# else:
self.ys = []
if self.desenhar_linhas:
# linha superior (640 x 50)
cv2.line(frame, (0, 50), (int(self.width), 50),
(0, 255, 255), 2)
# linha inferior (640 x 430)
cv2.line(frame, (0, int(self.height - 50)),
(int(self.width), int(self.height - 50)), (0, 255, 255), 2)
# linha que define se o usuário agachou (640 x 330)
cv2.line(frame, (0, int(self.height - 150)),
(int(self.width), int(self.height - 150)), (0, 0, 255), 2)
cv2.imshow('JUMP! Detecção', frame)
key = cv2.waitKey(25)
if key == 27: # esc
break
self.reiniciar()
'''if self.conexao is None:
self.reiniciar()
else:
self.finalizar()'''
def reiniciar(self):
'''
reinicia a detecção e os recursos
'''
print 'reiniciando captura...'
self.ys = []
self.desenhar_linhas = False
self.calibrado = False
self.gerenciador_estado_jogador.finish()
if self.conexao is None:
self.gerenciador_estado_jogador = GerenciadorEstadoJogador()
else:
self.gerenciador_estado_jogador = GerenciadorEstadoJogador(conexao=self.conexao)
self.iniciar()
def finalizar(self):
'''
finaliza a detecção e os recursos
'''
self.calibrado = False
self.movimento = Movimentos.EM_PE
self.gerenciador_estado_jogador.finish()
self.camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-c", "--camera", dest="id_camera",
help="id da camera", type="int", default=0)
parser.add_option("-a", "--desagachar", dest="agachar_desabilitado",
action="store_true", help="Desabilitar agachar", default=False)
parser.add_option(
"-q", "--quiet", action="store_false", dest="verbose", default=True)
(options, args) = parser.parse_args()
detector_movimento = DetectorMovimento(
options.id_camera, options.agachar_desabilitado)
detector_movimento.iniciar()
detector_movimento.finalizar()
| gpl-3.0 |
SebastianMerz/calalert | Server/venv/lib/python2.7/site-packages/openid/extensions/sreg.py | 143 | 17848 | """Simple registration request and response parsing and object representation
This module contains objects representing simple registration requests
and responses that can be used with both OpenID relying parties and
OpenID providers.
1. The relying party creates a request object and adds it to the
C{L{AuthRequest<openid.consumer.consumer.AuthRequest>}} object
before making the C{checkid_} request to the OpenID provider::
auth_request.addExtension(SRegRequest(required=['email']))
2. The OpenID provider extracts the simple registration request from
the OpenID request using C{L{SRegRequest.fromOpenIDRequest}},
gets the user's approval and data, creates a C{L{SRegResponse}}
object and adds it to the C{id_res} response::
sreg_req = SRegRequest.fromOpenIDRequest(checkid_request)
# [ get the user's approval and data, informing the user that
# the fields in sreg_response were requested ]
sreg_resp = SRegResponse.extractResponse(sreg_req, user_data)
sreg_resp.toMessage(openid_response.fields)
3. The relying party uses C{L{SRegResponse.fromSuccessResponse}} to
extract the data from the OpenID response::
sreg_resp = SRegResponse.fromSuccessResponse(success_response)
@since: 2.0
@var sreg_data_fields: The names of the data fields that are listed in
the sreg spec, and a description of them in English
@var sreg_uri: The preferred URI to use for the simple registration
namespace and XRD Type value
"""
from openid.message import registerNamespaceAlias, \
NamespaceAliasRegistrationError
from openid.extension import Extension
from openid import oidutil
try:
basestring #pylint:disable-msg=W0104
except NameError:
# For Python 2.2
basestring = (str, unicode) #pylint:disable-msg=W0622
__all__ = [
'SRegRequest',
'SRegResponse',
'data_fields',
'ns_uri',
'ns_uri_1_0',
'ns_uri_1_1',
'supportsSReg',
]
# The data fields that are listed in the sreg spec
data_fields = {
'fullname':'Full Name',
'nickname':'Nickname',
'dob':'Date of Birth',
'email':'E-mail Address',
'gender':'Gender',
'postcode':'Postal Code',
'country':'Country',
'language':'Language',
'timezone':'Time Zone',
}
def checkFieldName(field_name):
"""Check to see that the given value is a valid simple
registration data field name.
@raise ValueError: if the field name is not a valid simple
registration data field name
"""
if field_name not in data_fields:
raise ValueError('%r is not a defined simple registration field' %
(field_name,))
# URI used in the wild for Yadis documents advertising simple
# registration support
ns_uri_1_0 = 'http://openid.net/sreg/1.0'
# URI in the draft specification for simple registration 1.1
# <http://openid.net/specs/openid-simple-registration-extension-1_1-01.html>
ns_uri_1_1 = 'http://openid.net/extensions/sreg/1.1'
# This attribute will always hold the preferred URI to use when adding
# sreg support to an XRDS file or in an OpenID namespace declaration.
ns_uri = ns_uri_1_1
try:
registerNamespaceAlias(ns_uri_1_1, 'sreg')
except NamespaceAliasRegistrationError, e:
oidutil.log('registerNamespaceAlias(%r, %r) failed: %s' % (ns_uri_1_1,
'sreg', str(e),))
def supportsSReg(endpoint):
"""Does the given endpoint advertise support for simple
registration?
@param endpoint: The endpoint object as returned by OpenID discovery
@type endpoint: openid.consumer.discover.OpenIDEndpoint
@returns: Whether an sreg type was advertised by the endpoint
@rtype: bool
"""
return (endpoint.usesExtension(ns_uri_1_1) or
endpoint.usesExtension(ns_uri_1_0))
class SRegNamespaceError(ValueError):
"""The simple registration namespace was not found and could not
be created using the expected name (there's another extension
using the name 'sreg')
This is not I{illegal}, for OpenID 2, although it probably
indicates a problem, since it's not expected that other extensions
will re-use the alias that is in use for OpenID 1.
If this is an OpenID 1 request, then there is no recourse. This
should not happen unless some code has modified the namespaces for
the message that is being processed.
"""
def getSRegNS(message):
"""Extract the simple registration namespace URI from the given
OpenID message. Handles OpenID 1 and 2, as well as both sreg
namespace URIs found in the wild, as well as missing namespace
definitions (for OpenID 1)
@param message: The OpenID message from which to parse simple
registration fields. This may be a request or response message.
@type message: C{L{openid.message.Message}}
@returns: the sreg namespace URI for the supplied message. The
message may be modified to define a simple registration
namespace.
@rtype: C{str}
@raise ValueError: when using OpenID 1 if the message defines
the 'sreg' alias to be something other than a simple
registration type.
"""
# See if there exists an alias for one of the two defined simple
# registration types.
for sreg_ns_uri in [ns_uri_1_1, ns_uri_1_0]:
alias = message.namespaces.getAlias(sreg_ns_uri)
if alias is not None:
break
else:
# There is no alias for either of the types, so try to add
# one. We default to using the modern value (1.1)
sreg_ns_uri = ns_uri_1_1
try:
message.namespaces.addAlias(ns_uri_1_1, 'sreg')
except KeyError, why:
# An alias for the string 'sreg' already exists, but it's
# defined for something other than simple registration
raise SRegNamespaceError(why[0])
# we know that sreg_ns_uri defined, because it's defined in the
# else clause of the loop as well, so disable the warning
return sreg_ns_uri #pylint:disable-msg=W0631
class SRegRequest(Extension):
"""An object to hold the state of a simple registration request.
@ivar required: A list of the required fields in this simple
registration request
@type required: [str]
@ivar optional: A list of the optional fields in this simple
registration request
@type optional: [str]
@ivar policy_url: The policy URL that was provided with the request
@type policy_url: str or NoneType
@group Consumer: requestField, requestFields, getExtensionArgs, addToOpenIDRequest
@group Server: fromOpenIDRequest, parseExtensionArgs
"""
ns_alias = 'sreg'
def __init__(self, required=None, optional=None, policy_url=None,
sreg_ns_uri=ns_uri):
"""Initialize an empty simple registration request"""
Extension.__init__(self)
self.required = []
self.optional = []
self.policy_url = policy_url
self.ns_uri = sreg_ns_uri
if required:
self.requestFields(required, required=True, strict=True)
if optional:
self.requestFields(optional, required=False, strict=True)
# Assign getSRegNS to a static method so that it can be
# overridden for testing.
_getSRegNS = staticmethod(getSRegNS)
def fromOpenIDRequest(cls, request):
"""Create a simple registration request that contains the
fields that were requested in the OpenID request with the
given arguments
@param request: The OpenID request
@type request: openid.server.CheckIDRequest
@returns: The newly created simple registration request
@rtype: C{L{SRegRequest}}
"""
self = cls()
# Since we're going to mess with namespace URI mapping, don't
# mutate the object that was passed in.
message = request.message.copy()
self.ns_uri = self._getSRegNS(message)
args = message.getArgs(self.ns_uri)
self.parseExtensionArgs(args)
return self
fromOpenIDRequest = classmethod(fromOpenIDRequest)
def parseExtensionArgs(self, args, strict=False):
"""Parse the unqualified simple registration request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized simple
registration request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the sreg namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
>>> args = message.getArgs(ns_uri)
>>> request.parseExtensionArgs(args)
@param args: The unqualified simple registration arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the simple registration specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object
"""
for list_name in ['required', 'optional']:
required = (list_name == 'required')
items = args.get(list_name)
if items:
for field_name in items.split(','):
try:
self.requestField(field_name, required, strict)
except ValueError:
if strict:
raise
self.policy_url = args.get('policy_url')
def allRequestedFields(self):
"""A list of all of the simple registration fields that were
requested, whether they were required or optional.
@rtype: [str]
"""
return self.required + self.optional
def wereFieldsRequested(self):
"""Have any simple registration fields been requested?
@rtype: bool
"""
return bool(self.allRequestedFields())
def __contains__(self, field_name):
"""Was this field in the request?"""
return (field_name in self.required or
field_name in self.optional)
def requestField(self, field_name, required=False, strict=False):
"""Request the specified field from the OpenID user
@param field_name: the unqualified simple registration field name
@type field_name: str
@param required: whether the given field should be presented
to the user as being a required to successfully complete
the request
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when the field requested is not a simple
registration field or strict is set and the field was
requested more than once
"""
checkFieldName(field_name)
if strict:
if field_name in self.required or field_name in self.optional:
raise ValueError('That field has already been requested')
else:
if field_name in self.required:
return
if field_name in self.optional:
if required:
self.optional.remove(field_name)
else:
return
if required:
self.required.append(field_name)
else:
self.optional.append(field_name)
def requestFields(self, field_names, required=False, strict=False):
"""Add the given list of fields to the request
@param field_names: The simple registration data fields to request
@type field_names: [str]
@param required: Whether these values should be presented to
the user as required
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when a field requested is not a simple
registration field or strict is set and a field was
requested more than once
"""
if isinstance(field_names, basestring):
raise TypeError('Fields should be passed as a list of '
'strings (not %r)' % (type(field_names),))
for field_name in field_names:
self.requestField(field_name, required, strict=strict)
def getExtensionArgs(self):
"""Get a dictionary of unqualified simple registration
arguments representing this request.
This method is essentially the inverse of
C{L{parseExtensionArgs}}. This method serializes the simple
registration request fields.
@rtype: {str:str}
"""
args = {}
if self.required:
args['required'] = ','.join(self.required)
if self.optional:
args['optional'] = ','.join(self.optional)
if self.policy_url:
args['policy_url'] = self.policy_url
return args
class SRegResponse(Extension):
"""Represents the data returned in a simple registration response
inside of an OpenID C{id_res} response. This object will be
created by the OpenID server, added to the C{id_res} response
object, and then extracted from the C{id_res} message by the
Consumer.
@ivar data: The simple registration data, keyed by the unqualified
simple registration name of the field (i.e. nickname is keyed
by C{'nickname'})
@ivar ns_uri: The URI under which the simple registration data was
stored in the response message.
@group Server: extractResponse
@group Consumer: fromSuccessResponse
@group Read-only dictionary interface: keys, iterkeys, items, iteritems,
__iter__, get, __getitem__, keys, has_key
"""
ns_alias = 'sreg'
def __init__(self, data=None, sreg_ns_uri=ns_uri):
Extension.__init__(self)
if data is None:
self.data = {}
else:
self.data = data
self.ns_uri = sreg_ns_uri
def extractResponse(cls, request, data):
"""Take a C{L{SRegRequest}} and a dictionary of simple
registration values and create a C{L{SRegResponse}}
object containing that data.
@param request: The simple registration request object
@type request: SRegRequest
@param data: The simple registration data for this
response, as a dictionary from unqualified simple
registration field name to string (unicode) value. For
instance, the nickname should be stored under the key
'nickname'.
@type data: {str:str}
@returns: a simple registration response object
@rtype: SRegResponse
"""
self = cls()
self.ns_uri = request.ns_uri
for field in request.allRequestedFields():
value = data.get(field)
if value is not None:
self.data[field] = value
return self
extractResponse = classmethod(extractResponse)
# Assign getSRegArgs to a static method so that it can be
# overridden for testing
_getSRegNS = staticmethod(getSRegNS)
def fromSuccessResponse(cls, success_response, signed_only=True):
"""Create a C{L{SRegResponse}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@param signed_only: Whether to process only data that was
signed in the id_res message from the server.
@type signed_only: bool
@rtype: SRegResponse
@returns: A simple registration response containing the data
that was supplied with the C{id_res} response.
"""
self = cls()
self.ns_uri = self._getSRegNS(success_response.message)
if signed_only:
args = success_response.getSignedNS(self.ns_uri)
else:
args = success_response.message.getArgs(self.ns_uri)
if not args:
return None
for field_name in data_fields:
if field_name in args:
self.data[field_name] = args[field_name]
return self
fromSuccessResponse = classmethod(fromSuccessResponse)
def getExtensionArgs(self):
"""Get the fields to put in the simple registration namespace
when adding them to an id_res message.
@see: openid.extension
"""
return self.data
# Read-only dictionary interface
def get(self, field_name, default=None):
"""Like dict.get, except that it checks that the field name is
defined by the simple registration specification"""
checkFieldName(field_name)
return self.data.get(field_name, default)
def items(self):
"""All of the data values in this simple registration response
"""
return self.data.items()
def iteritems(self):
return self.data.iteritems()
def keys(self):
return self.data.keys()
def iterkeys(self):
return self.data.iterkeys()
def has_key(self, key):
return key in self
def __contains__(self, field_name):
checkFieldName(field_name)
return field_name in self.data
def __iter__(self):
return iter(self.data)
def __getitem__(self, field_name):
checkFieldName(field_name)
return self.data[field_name]
def __nonzero__(self):
return bool(self.data)
| gpl-2.0 |
razvanphp/arangodb | 3rdParty/V8-3.31.74.1/third_party/python_26/Lib/encodings/cp1140.py | 593 | 13361 | """ Python Character Mapping Codec cp1140 generated from 'python-mappings/CP1140.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1140',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\u20ac' # 0x9F -> EURO SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
mikekestemont/ruzicka | code/ruzicka/art.py | 1 | 45257 | #!/usr/bin/env python
__doc__ = '''art.py -- Approximate Randomization Test
This script carries out a significance test on the output of an
instance-based machine learner based on the theory of
approximate randomization tests:
Eric W. Noreen, Computer-intensive Methods for Testing Hypotheses: An Introduction, John Wiley & Sons, New York, NY, USA, 1989.
No assumptions are made on the distribution of the variables. The only assumption made is that there are
no inter-instance dependencies, i.e. knowing the class label of 1 instance should not help
knowing the class label of another instance. This assumption is violated in the output from the MBT (memory-based tagger).
A nice example of why no inter-instance dependencies should be present is in:
Alexander Yeh, More accurate tests for the statistical significance of result differences,
in: Proceedings of the 18th International Conference on Computational Linguistics, Volume 2,
pages 947-953, 2000.
TEST STATISTICS
At the moment, the test statitics tested are differences in macro-recall, macro-precision, macro-f-score, micro-f-score, and accuracy.
This can be changed by changing the getscores() function.
DEPENDENCIES
This script depends on confusionmatrix.py and combinations.py (www.clips.ua.ac.be/~vincent/software.html)
and optionally scipy (www.scipy.org).
Copyright (c) 2013 CLiPS. All rights reserved.
# License: GNU General Public License, see http://www.clips.ua.ac.be/~vincent/scripts/LICENSE.txt
'''
__author__="Vincent Van Asch"
__date__="September 2013"
__version__="3.0.3"
__url__ = 'http://www.clips.ua.ac.be/~vincent/software.html'
import sys, os, time
import random
import getopt
from math import pow
try:
from scipy.stats import binom_test
except ImportError:
print >>sys.stderr, 'INFO: Could not import scipy (www.scipy.org): signtest is not available.'
try:
import confusionmatrix
except ImportError:
raise ImportError('''This script depends on confusionmatrix.py (www.clips.ua.ac.be/~vincent/software.html).
Place the script in the same folder as the art.py script.''')
try:
import combinations
except ImportError:
raise ImportError('''This script depends on combinations.py (www.clips.ua.ac.be/~vincent/software.html).
Place the script in the same folder as the art.py script.''')
def loginfo(s):
print >>sys.stderr, '%s: %s' %(time.strftime('%d/%m/%Y %H:%M:%S'), s)
def fread(fname, index=None, sep=None, encoding='utf8'):
'''Reads in files as lists.
sep: feature separator
index: if None, the elements of the output list are the full lines
if int, the elements of the output list are string at position index
if tuple, the elements of the output list slices from the full lines (as lists)
'''
output=[]
with open(os.path.abspath(os.path.expanduser(fname)), 'rU') as f:
for l in f:
line = l.strip()
if line:
line = line.decode(encoding)
if index is None:
output.append(line)
else:
line = line.split(sep)
if isinstance(index, int):
output.append(line[index])
elif isinstance(index, (list, tuple)):
if len(index) != 2: raise ValueError('index should have length 2 not %d' %len(index))
output.append(line[index[0]:index[1]])
else:
raise TypeError('index should be None, int or tuple')
return output
def strata_read(fname, sep=None, encoding='utf8'):
out={}
with open(os.path.abspath(os.path.expanduser(fname)), 'rU') as f:
for l in f:
line = l.strip().decode(encoding)
if line:
parts = line.split(sep)
stratum = parts[0]
group = parts[1]
data = [float(x) for x in parts[2:]]
if stratum in out.keys():
out[stratum][group] = data
else:
out[stratum] = {group:data}
return out
MBTSEP = '\x13'
def mbtread(fname, sep="<utt>"):
'''Reads in the sentences from an mbt format file.
sep: sentence seperator (empty lines are also considered as sentence
boundaries)
Returns a list of strings.
Each string are the concatenated token labels from 1 sentence'''
output=[]
sentence=[]
with open(os.path.abspath(os.path.expanduser(fname)), 'rU') as f:
for l in f:
line = l.strip()
if line and line != sep:
sentence.append(line.split()[-1])
else:
if sentence: output.append(MBTSEP.join(sentence))
sentence=[]
if sentence: output.append(MBTSEP.join(sentence))
return output
def readtraining(fname, index=-1, sep=None):
'''Reads in training and returns a dictionary with the distribution of the
classes in training'''
d={}
for label in fread(fname, sep=sep, index=index):
try:
d[label]+=1
except KeyError:
d[label] = 1
return d
def signtest(gold, system1, system2):
'''Sign test for labeling accuracy'''
assert len(gold) == len(system1) == len(system2)
# Get all number where system1 is correct and the other false
s1correct=0
s2correct=0
wrong=0
for g, s1, s2 in zip(gold, system1, system2):
if g==s1:
s1correct+=1
elif g==s2:
s2correct+=1
else:
wrong+=1
# The total number of predictions that are only correctly predicted
# by 1 system
total = s1correct+s2correct
# make sure we test the smallest because of
# bug with unequal N in binom_test
correct = min([s1correct, s2correct])
try:
p = binom_test(correct, total)
except NameError:
raise NameError('Module scipy (www.scipy.org) was not imported.')
return p
def termsigntest(gold, system1, system2):
'''Sign test for term extraction recall'''
print >>sys.stderr, 'WARNING: this function has not been validated'
# True postives for only 1 system
s1correct=0
s2correct=0
fn=0
for t in gold:
if t in system1:
if t not in system2:
s1correct+=1
elif t in system2:
s2correct+=1
else:
fn +=1
# The total number of predictions that are only correctly predicted
# by 1 system
total = s1correct+s2correct
try:
p = binom_test(s1correct, total)
except NameError:
raise NameError('Module scipy (www.scipy.org) was not imported.')
return p
def getscores(gold, system, training=None):
'''
Takes a gold and system list and returns a dictionary with
macro-recall, macro-precision, macro-f-score, micro-f-score, accuracy.
If training is given it uses the class label counts from training to compute the scores.
gold: a list of class labels
system: a list of class labels (in the same order as gold)
training: a dictionary:
key: class label
value: number of occurrences
Returns a dictionary:
key: performance measure name
value: performance score
'''
# Get confusion matrix
assert len(gold) == len(system)
# light mode for speed
cm = confusionmatrix.ConfusionMatrix(light=True)
# Add training
if training:
for k, v in training.items():
for i in range(v):
cm.add_training([k])
# Add data
for g, s in zip(gold, system):
cm.single_add(g, s)
output={'macro-av. recall': cm.averaged(level=confusionmatrix.MACRO, score=confusionmatrix.RECALL, training=bool(training)), \
'macro-av. precision': cm.averaged(level=confusionmatrix.MACRO, score=confusionmatrix.PRECISION, training=bool(training)), \
'macro-av. f-score': cm.averaged(level=confusionmatrix.MACRO, score=confusionmatrix.FSCORE, training=bool(training)), \
'micro-av. f-score': cm.averaged(level=confusionmatrix.MICRO, score=confusionmatrix.FSCORE, training=bool(training)), \
'micro-av. precision': cm.averaged(level=confusionmatrix.MICRO, score=confusionmatrix.PRECISION, training=bool(training)), \
'micro-av. recall': cm.averaged(level=confusionmatrix.MICRO, score=confusionmatrix.RECALL, training=bool(training)), \
'lfb-micro-av. f-score': cm.averaged(level=confusionmatrix.MICROt, score=confusionmatrix.FSCORE, training=bool(training)), \
'lfb-micro-av. precision': cm.averaged(level=confusionmatrix.MICROt, score=confusionmatrix.PRECISION, training=bool(training)), \
'lfb-micro-av. recall': cm.averaged(level=confusionmatrix.MICROt, score=confusionmatrix.RECALL, training=bool(training)), \
'accuracy': cm.accuracy()}
return output
def getscores2(gold, system, training=None):
P = float(len([i for i in system if i in gold])) / len(system)
R = float(len([i for i in system if i in gold])) / len(gold)
if P==0 or R==0:
F=0.0
else:
F = 2*P*R/(P+R)
return {'recall': R, 'precision':P, 'f1-score': F}
def getscoresmbt(gold, system, training=None):
'''Returns the mbt accuracy for the sentence'''
correct=0
total=0
for g, s in zip(gold, system):
g = g.split(MBTSEP)
s = s.split(MBTSEP)
assert len(g) == len(s)
total += len(g)
for gi, si in zip(g, s):
if gi == si:
correct+=1
return {'accuracy': correct/float(total)}
def getscoresmbtmulti(gold, system, training=None, sep='_'):
'''Returns scores for mbt'''
# Create the yielder
def reader(gold, system):
for g,s in zip(gold, system):
g = g.split(MBTSEP)
s = s.split(MBTSEP)
assert len(g) == len(s)
for gi, si in zip(g, s):
gi = set(gi.split(sep))
si = set(si.split('_'))
yield gi, si
r = reader(gold, system)
cm = confusionmatrix.ConfusionMatrix(compute_none=True)
for g, p in r:
cm.add(list(g), list(p))
out={}
for label in cm.labels:
out[label] = cm.fscore(label)
out['micro-fscore']=cm.averaged(level=confusionmatrix.MICRO, score=confusionmatrix.FSCORE)
return out
def average(dumy, values, training=None):
return {'mean': sum(values)/float(len(values))}
def teststatistic(gold, system1, system2, training=None, scoring=getscores, absolute=True):
'''Takes all lists and returns the value for 5 test statistics:
macro-recall, macro-precision, macro-f-score, micro-f-score, accuracy
scoring: the function that calcutates the performances
absolute: if True : the absolute difference of system1 performance and system2 performance
if False: system1 performance minus system2 performance
'''
# Get the reference performance difference
scores1 = scoring(gold, system1, training=training)
scores2 = scoring(gold, system2, training=training)
# Compute the differences between system1 and system2
diffs={}
for k in set(scores1.keys()+scores2.keys()):
diff = scores1.get(k, 0)-scores2.get(k, 0)
if absolute: diff = abs(diff)
diffs[k] = diff
return diffs
def distribute(s):
'''Distribute the elements of s randomly over 2 lists'''
batch1=[]; batch2 =[]
data=s[:]
while data:
d = data.pop()
b = random.choice([batch1, batch2])
b.append(d)
assert len(data) == 0, data
return batch1, batch2
def getprobabilities(ngecounts, N, add=1, verbose=False):
'''Calculates the probabilities from the ngecounts.
The probabilities are calculated as:
(neg + add)/(N + add)
Returns a dictionay:
keys: performance name
value: probaility
ngecounts: a dictionary:
keys: performance name
value: nge
N: number of trials
add: integer
'''
# Calculate probabilities
probs={}
for k, nge in ngecounts.items():
prob = (nge + add)/float(N + add)
probs[k] = prob
if verbose:
print >>sys.stderr, 'Probabilities for accepting H0:'
names = probs.keys()
names.sort()
for name in names:
print ' %-23s: %.5g' %(name, probs[name])
return probs
def get_alternatives(l):
'''The length of the outputs'''
# number of bins
nbins = int(pow(2, l))
# Fill the bins
bins=[[] for i in range(nbins)]
for i in range(l):
switchpoint = pow(2, i)
filler=False
for j, bin in enumerate(bins):
if not j%switchpoint:
filler = not filler
bin.append(int(filler))
return bins
def exactlabelingsignificance(gold, system1, system2, verbose=False, training=None, scoring=getscores, common=[], common_gold=[]):
'''Carries out exact randomization'''
# number of permutations
N = pow(2, len(gold))
if verbose: loginfo('%d permutations' %N)
if N > 5000000: raise ValueError('The number of permutations is too big. Aborting.')
# the reference test statitsics
refdiffs = teststatistic(gold+common_gold, system1+common, system2+common, training=training, scoring=scoring)
# Get all combinations
size = len(gold)
count=0
systems = [system1, system2]
ngecounts = {}
if N >= 10:
nom = int(N/10.0)
else:
nom=1
alternatives = get_alternatives(size)
while alternatives:
alt = alternatives.pop()
count+=1
shuffle1 = [systems[k][j] for j,k in enumerate(alt)]
shuffle2 = [systems[1-k][j] for j,k in enumerate(alt)]
# the test statistics
diffs = teststatistic(gold+common_gold, shuffle1+common, shuffle2+common, training=training, scoring=scoring)
if verbose and not (count%nom): loginfo('Calculated permutation %d/%d' %(count, N))
for k in refdiffs.keys():
pseudo = diffs[k]
actual = refdiffs[k]
if pseudo >= actual:
ngecounts[k] = ngecounts.get(k, 0) + 1
elif k not in ngecounts.keys():
ngecounts[k]=0
assert count == N
assert set(ngecounts.keys()) == set(refdiffs.keys())
# Calculate probabilities
probs=getprobabilities(ngecounts, N, add=0, verbose=True)
return probs
def labelingsignificance(gold, system1, system2, N=1000, verbose=False, training=None, scoring=getscores, show_probs=True, common=[], common_gold=[]):
'''Calculate approximate randomization test for class labeling experiments
Returns the probabilities for accepting H0 for
macro-recall, macro-precision, macro-fscore, micro-fscore, accuracy
training: the counts of the class labels in the training file
N: number of iterations
'''
# the reference test statitsics
refdiffs = teststatistic(gold+common_gold, system1+common, system2+common, training=training, scoring=scoring)
# start shuffling
source = [[s1,s2] for s1,s2 in zip(system1, system2)]
if N >= 10:
nom = int(N/10.0)
else:
nom=1
ngecounts={}
for i in range(N):
shuffle1=[]
shuffle2=[]
for preds in source:
random.shuffle(preds)
shuffle1.append(preds[0])
shuffle2.append(preds[1])
# the test statistics
diffs = teststatistic(gold+common_gold, shuffle1+common, shuffle2+common, training=training, scoring=scoring)
# see whether the shuffled system performs better than the originals
for k in refdiffs.keys():
pseudo = diffs[k]
actual = refdiffs[k]
if pseudo >= actual:
ngecounts[k] = ngecounts.get(k, 0) + 1
elif k not in ngecounts.keys():
ngecounts[k]=0
if verbose and not ((i+1)%nom):
loginfo('Calculated shuffle %d/%d' %(i+1, N))
#getprobabilities(ngecounts, i+1, add=1, verbose=True)
# Sign test check
if scoring.func_name == 'getscores':
try:
s = signtest(gold, system1, system2)
if verbose: loginfo('Sign-test probability: %.4g' %s)
except NameError:
pass
assert set(ngecounts.keys()) == set(refdiffs.keys())
# Calculate probabilities
probs=getprobabilities(ngecounts, N, add=1, verbose=show_probs)
return probs
def exacttermsignificance(gold, system1, system2, verbose=False, absolute=False):
'''Compute exact term significance'''
# Take unique terms
source = []
doubles=[]
for t in list(set(system1+system2)):
if t in system1 and t not in system2:
source.append(t)
elif t not in system1 and t in system2:
source.append(t)
else:
doubles.append(t)
# The number of combinations
N=1
for i in range(len(source)+1):
N+=combinations.ncombinations(len(source), i)
if verbose: loginfo('%d combinations' %N)
if N > 5000000: raise ValueError('The number of permutations is too big. Aborting.')
# the reference test statitsics
refdiffs = teststatistic(gold, system1, system2, scoring=getscores2, absolute=absolute)
if N >= 10:
nom = int(N/10.0)
else:
nom=1
count=0
ngecounts={}
for i in range(len(source)+1):
for subset in combinations.subsets(source, i):
count+=1
shuffle1 = list(subset)
shuffle2 = []
for x in source:
if x not in shuffle1:
shuffle2.append(x)
#print shuffle1, shuffle2, doubles
# the test statistics
diffs = teststatistic(gold, shuffle1+doubles, shuffle2+doubles, scoring=getscores2, absolute=absolute)
# see whether the shuffled system performs better than the originals
for k in refdiffs.keys():
pseudo = diffs[k]
actual = refdiffs[k]
if pseudo >= actual:
ngecounts[k] = ngecounts.get(k, 0) + 1
elif k not in ngecounts.keys():
ngecounts[k]=0
if verbose and not ((count)%nom):
loginfo('Calculated combination %d/%d' %(count, N))
#getprobabilities(ngecounts, i+1, add=1, verbose=verbose)
assert count == N
assert set(ngecounts.keys()) == set(refdiffs.keys())
# Calculate probabilities
probs = getprobabilities(ngecounts, N, add=0, verbose=True)
return probs
def termsignificance(gold, system1, system2, N=10000, verbose=False, absolute=False):
'''Calculate randomized term significance'''
# Only uniques terms in a system
assert len(set(gold)) == len(gold)
assert len(set(system1)) == len(system1)
assert len(set(system2)) == len(system2)
# Get all terms that are unique for a system
source = []
doubles=[]
news1=[]; news2=[]
for t in list(set(system1+system2)):
if t in system1 and t not in system2:
source.append(t)
news1.append(t)
elif t not in system1 and t in system2:
source.append(t)
news2.append(t)
else:
doubles.append(t)
# the reference test statitsics
refdiffs = teststatistic(gold, system1, system2, scoring=getscores2, absolute=absolute)
if N >= 10:
nom = int(N/10.0)
else:
nom=1
ngecounts={}
for i in range(N):
shuffle1, shuffle2 = distribute(source)
# the test statistics
diffs = teststatistic(gold, shuffle1+doubles, shuffle2+doubles, scoring=getscores2, absolute=absolute)
# see whether the shuffled system performs better than the originals
for k in refdiffs.keys():
pseudo = diffs[k]
actual = refdiffs[k]
if pseudo >= actual:
ngecounts[k] = ngecounts.get(k, 0) + 1
elif k not in ngecounts.keys():
ngecounts[k]=0
if verbose and not ((i+1)%nom):
loginfo('Calculated shuffle %d/%d' %(i+1, N))
#getprobabilities(ngecounts, i+1, add=1, verbose=verbose)
assert set(ngecounts.keys()) == set(refdiffs.keys())
# Calculate probabilities
probs = getprobabilities(ngecounts, N, add=1, verbose=True)
return probs
def getdifference(system1, system2, gold=None):
'''
Takes lists of labels and returns lists with only those
entries for which s1!=s2.
If the list gold is given, it also returns only the gold labels
for those elements.
'''
new_system1=[]
new_system2=[]
new_gold=[]
rest1=[]
rest2=[]
common_gold=[]
G=True
if gold is None:
G=False
gold = system1[:]
if len(system1) != len(system1) != len(gold): raise ValueError('Input lists should have the same length')
for g, s1, s2 in zip(gold, system1, system2):
if s1!=s2:
new_system1.append(s1)
new_system2.append(s2)
if G:
new_gold.append(g)
else:
rest1.append(s1)
rest2.append(s2)
common_gold.append(g)
if not G: new_gold=[]
assert rest1 == rest2
return new_system1, new_system2, new_gold, rest1, common_gold
def main(gold, system1, system2, verbose=False, N=10000, exact_threshold=20, training=None, scoring=getscores):
'''
exact_threshold: the maximum number of instance to calculate exact randomization instead of approximate
'''
# Check
if not (len(gold) == len(system1) == len(system2)):
raise ValueError('There should be an equal number of non-empty lines in each input file.')
# Shuffle only those instances that have a different class label
news1, news2, newgold, common, common_gold = getdifference(system1, system2, gold)
if verbose:
for i,s in enumerate([system1, system2]):
scores = scoring(gold, s, training=training)
lines=['Scores for system%d:' %(i+1)]
keys = scores.keys()
keys.sort()
for k in keys:
lines.append(' %-23s : %.4f' %(k, scores[k]))
print >>sys.stderr, '\n'.join(lines)
print >>sys.stderr
loginfo('-'*50)
# only shuffle difference: quicker and same probability results
gold = newgold
system1 = news1
system2 = news2
total_uniq = len(gold)
# The number of instances with different predictions
if verbose: loginfo('Found %d predictions that are different for the 2 systems' %(total_uniq))
# number of permutations
try:
np = pow(2, len(gold))
except OverflowError:
np = 1000000001
if np > 1000000000:
loginfo('Number of permutations: more than 1,000,000,000')
else:
loginfo('Number of permutations: %d' %np)
if np <= N and total_uniq > exact_threshold:
loginfo('NOTE:')
loginfo('The number of permutations is lower than the number of shuffles.')
loginfo('You may want to calculate exact randomization. To do this')
loginfo('set option -t higher than %d.' %total_uniq)
if total_uniq <= exact_threshold:
if verbose: loginfo('This is equal or less than the %d predictions threshold: calculating exact randomization' %(exact_threshold))
probs = exactlabelingsignificance(gold, system1, system2, verbose=verbose, training=training, scoring=scoring, common=common, common_gold=common_gold)
else:
probs = labelingsignificance(gold, system1, system2, N=N, verbose=verbose, training=training, scoring=scoring, common=common, common_gold=common_gold)
if verbose: loginfo('Done')
return probs
def main2(gold, system1, system2, verbose=False, N=1048576, absolute=True, exact_threshold=10):
''' the main for term extraction'''
# No doubles
news1 = list(set(system1))
news2 = list(set(system2))
newgold = list(set(gold))
gold = newgold
system1 = news1
system2 = news2
if verbose:
print >>sys.stderr
for i,s in enumerate([system1, system2]):
scores = getscores2(gold, s, training=training)
lines=['Scores for system%d:' %(i+1)]
keys = scores.keys()
keys.sort()
for k in keys:
lines.append(' %-23s : %.4f' %(k, scores[k]))
print >>sys.stderr, '\n'.join(lines)
print >>sys.stderr
loginfo('-'*50)
# the number of terms that occur only in s1 or in s2
union=set(system1+system2)
intersect = set(system1).intersection(set(system2))
total_uniq = len(union) - len(intersect)
if verbose: loginfo('Found %d predictions that are different for the 2 systems' %(total_uniq))
if total_uniq < exact_threshold:
if verbose: loginfo('This is equal of less than the %d terms threshold: calculating exact randomization' %(exact_threshold))
probs = exacttermsignificance(gold, system1, system2, verbose=verbose, absolute=absolute)
else:
probs= termsignificance(gold, system1, system2, N=N, verbose=verbose, absolute=absolute)
if verbose: loginfo('Done')
return probs
def main3(data, verbose=False, N=1048576, absolute=True):
'''For stratified shuffling'''
# The groups
scoring_func=average
groups = data[data.keys()[0]].keys()
groups.sort()
assert len(groups) == 2
if verbose:
strata = data.keys()
strata.sort()
stext = 'a'
if len(strata) == 1: stext='um'
loginfo('Found %d strat%s: %s' %(len(data), stext, ', '.join(strata)))
loginfo('')
loginfo('Computing %d shuffles' %N)
loginfo('H0: there is no absolute difference between the means of %s and %s' %tuple(groups))
loginfo(' Commonly, you reject H0 if the probability drops below')
loginfo(' a predefined significance level, e.g 0.05.')
loginfo('-'*50)
systems={groups[0]:[], groups[1]:[]}
for stratum, d in data.items():
for g in groups:
systems[g] += d[g]
if verbose:
print >>sys.stderr
for g in groups:
s = systems[g]
scores = scoring_func(None, s)
lines=['Scores for group %s:' %(g)]
keys = scores.keys()
keys.sort()
for k in keys:
lines.append(' %-23s : %.4f' %(k, scores[k]))
print >>sys.stderr, '\n'.join(lines)
print >>sys.stderr
loginfo('-'*50)
# Reference
refdiffs = teststatistic(None, systems[groups[0]], systems[groups[1]], training=None, scoring=average, absolute=absolute)
if N >= 10:
nom = int(N/10.0)
else:
nom=1
# Start shuffling
ngecounts={}
for i in range(N):
shuffled={}
for stratum, d in data.items():
values = d[groups[0]] + d[groups[1]]
n1 = len(d[groups[0]])
n2 = len(d[groups[1]])
labels = [groups[0]]*n1+ [groups[1]]*n2
random.shuffle(labels)
for l, v in zip(labels, values):
shuffled[l] = shuffled.get(l ,[]) + [v]
# the test statistics
diffs = teststatistic(None, shuffled[groups[0]], shuffled[groups[1]], scoring=scoring_func, absolute=absolute)
# see whether the shuffled system performs better than the originals
for k in refdiffs.keys():
pseudo = diffs[k]
actual = refdiffs[k]
if pseudo >= actual:
ngecounts[k] = ngecounts.get(k, 0) + 1
elif k not in ngecounts.keys():
ngecounts[k] = 0
if verbose and not ((i+1)%nom):
loginfo('Calculated shuffle %d/%d' %(i+1, N))
assert set(ngecounts.keys()) == set(refdiffs.keys())
# Calculate probabilities
probs = getprobabilities(ngecounts, N, add=1, verbose=True)
return probs
# ========================================================================================================================
# TESTING
# ========================================================================================================================
def Yeh():
'''Creates 3 synthetic files to reproduce the results from Section3.3 of
Alexander Yeh, More accurate tests for the statistical significance of result differences,
in: Proceedings of the 18th International Conference on Computational Linguistics, Volume 2,
pages 947-953, 2000.
The filenames are yeh.gold, yeh.s1 and yeh.s2
Running the following command reproduces the reported results:
$ python art.py -c yeh.gold -n1048576 -v -r -a yeh.s1 yeh.s2
Probabilities for accepting H0:
f1-score : 0.014643
precision : 0.97995
recall : 0.00010204
Note that the test statistic is system1-system2, so for precision the
probability from Yeh is 1 - 0.97995 = 0.02005
'''
gold = 'yeh.gold'
s1 = 'yeh.s1'
s2 = 'yeh.s2'
# The gold standard
with open(gold, 'w') as f:
for i in range(103):
f.write('%d\n' %i)
# System 1: R45.6 P49.5 F47.5
with open(s1, 'w') as f:
for i in range(19+28):
f.write('%d\n' %i) # retrieved by both and system1
for i in range(5):
f.write('B%d\n' %(i)) # spurious retrieved by both
for i in range(43):
f.write('one%d\n' %(i)) # spurious retrieved by system1
# System 2: R24.3 P64.1 F35.2
with open(s2, 'w') as f:
for i in range(19+6):
if i < 19:
f.write('%d\n' %i) # retrieved by both
else:
f.write('%d\n' %(i+28)) # retrieved by system2
for i in range(5):
f.write('B%d\n' %(i)) # spurious retrieved by both
for i in range(9):
f.write('two%d\n' %(i)) # spurious retrieved by system1
print 'Written:', gold, s1, s2
# ==================================================================================================================
if __name__ == '__main__':
def _usage():
print >>sys.stderr, '''Approximate Randomization testing (version %s)
This script can be used to assess the significance for differences in recall, precision,
f-score, and accuracy for two machine learner outputs.
The H0 hypothesis tested is:
There is no difference between SYSTEM1 and SYSTEM2 for a given score.
This hypothesis is tested for: macro-av. recall, macro-av. precision, macro-av. f-score, micro-av. f-score, and accuracy.
The output is a set of probabilities for accepting H0. If this probability is lower
than a predefined level (e.g. 0.05) then H0 is rejected.
USAGE
./art.py [-m] [-n int] [-c <gold-standard>] [-s sep] [-t int] [-T training] [-r] [-a] [-h] [-H] [-v] <output_a> <output_b>
OPTIONS
-n : Number of shuffles (default: 10000)
-c : Change the expected format for the input files, see FORMAT below
-s : Feature separator (default: whitespace)
-t : Define the maximal number of instances that can be in the input files
for exact randomization. The lower this value, the quicker approximate
randomization is carried out. If set to 0, approximation is always
carried out. Note that exact randomization for input files with
only 10 instances can already take a long time. (default: 10)
-T : Path to the training file used by both systems, see TRAINING below
-r : term extraction significance testing instead of labeling significance
testing, see TERM EXTRACTION below. -c is mandatory; -T is ignored
-a : use the actual difference instead of the absolute difference when
calculating test extraction significance
-m : test for MBT experiments, see MBT below. -c is obligatory.
-h : Print help
-H : Print more background information
-v : Verbose processing
FORMAT
Per default, the script expects 2 instance files tagged with
different classifiers.
- Each instance should be on a new line.
- All features and class labels should be separated with the feature
separator. This can be set with the -s option.
- An instance is a list of features; followed by the gold standard class label;
followed by the class label as predicted by the classifier (=standard Timbl output)
If option -c is set, an extra input file with the gold-standard class labels
should be provided. The format of all input files should be:
- one class label per new line (and nothing else)
- class labels belonging to the same instance should
be on the same line in all 3 input files.
VALIDITY
If scipy (www.scipy.org) is available and -v is set, the sign-test probability is also reported when
carrying out approximate randomization. This probability can be compared with the reported probability
for "accuracy" to check the validity of the randomization method. Both probabilities should be similar
or should at least lead to similar conclusions; otherwise you might consider increasing the number of
shuffles with option -n. Another validity check is rerunning the randomization test and comparing the
results.
The test carried out by the two-sided paired sign-test is:
H0: The number of correct predictions from SYSTEM1 that are incorrectly predicted by SYSTEM2
equals the number of correct predictions from SYSTEM2 that are incorrectly predicted by
SYSTEM1. (Predictions that are correct or incorrect for both systems are ignored.)
H0 is rejected if the reported sign-test probability is lower than a predefined level.
TRAINING
Macro- and micro-averaging is carried out by taking the class counts from the input files. If not every class
from the original training file occurs in the input files to the same extend, then the reported averaged scores
may differ from the scores from Timbl.
This averaging difference can be solved by supplying the training file with the -T option. The same training file
should be used by both systems.
When the -c option is set, the format of supplied file should be the same as the input files (only class labels);
if -c is not set, the supplied training file should contain instances but without predicted class labels, only
the gold standards labels.
Because setting and not setting the -T option influences the way the performance scores are computed, this also
influences the reported probabilities.
See also from confusionmatrix.py: $ python confusionmatrix.py -V
TERM EXTRACTION
The default setup is to compute the significance for Timbl style output. Is is possible to use this script
to calculate significance for term extraction. The -r option should be set. In this mode, the script
expects 3 files: gold_standard, system1, system2. All files should contain terms; each term on a new line.
It is not required that the number of extracted terms is the same for both systems, nor should it be
the same as the number of gold standard terms.
By default, the test statistic is the absolute difference of the performance from system1 and system2.
If the -a option is set, the test statistic is the signed difference.
The -ar mode is identical of the system described by Yeh, 2000, Section3.3. To reproduce the results:
To create the files:
$ python art.py --yeh
To run the randomization:
$ python art.py -ar -v -n1048576 -c yeh.gold yeh.s1 yeh.s2
For precision, the probability is (1 - reported_probability) because system2 has a higher precision than
system1.
MBT
It is also possible to process files in the MBT format. An MBT command looks like this:
$ Mbt -s training1.settings -T testfile > output1
$ Mbt -s training2.settings -T testfile > output2
If is now possible to test the significance of the accuracy:
$ python art.py -m -c testfile output1 output2
The probability computation is carried out in the same way as with the basic command for instance files
except that the "instances" in the case of Mbt are complete sentences -- there is no shuffling at the
token level because there are interdependencies between the token labels.
STRATIFIED SHUFFLING
It is also possible to reproduce the stratified shuffling example of Noreen 1989 (Section 2.7):
$ ./art.py -v -n 999 transfer.data
In which the format of transfer.data is 'stratum group values', like:
A transfer 2.0 3.0 2.2 2.1 2.2
A non-transfer 3.2 2.9 2.0 2.2 2.1 1.4
...
This option can also be used for the example in Section 2.1. Using ony one stratum.
NOTE
No assumptions are made on the distribution of the performance scores. The only assumption that is made is
that there are no inter-instance dependencies, i.e. knowing the class label of 1 instance should not help
knowing the class label of another instance. This assumption is violated in the output from the memory-based
tagger (MBT). This is the reason why the -m option shuffles at sentence level instead of token level.
DEPENDENCIES
This script depends on confusionmatrix.py and combinations.py (www.clips.ua.ac.be/~vincent/software.html)
and optionally scipy (www.scipy.org).
REFERENCES
Eric W. Noreen, Computer-intensive Methods for Testing Hypotheses: An Introduction, John Wiley & Sons, New York, NY, USA, 1989.
Alexander Yeh, More accurate tests for the statistical significance of result differences, in: Proceedings of the 18th International Conference on Computational Linguistics, Volume 2, pages 947-953, 2000.
%s, %s
''' %(__version__, __author__, __date__)
try:
opts,args=getopt.getopt(sys.argv[1:],'hHc:s:vn:t:T:ram', ['help', 'yeh'])
except getopt.GetoptError:
# print help information and exit:
_usage()
sys.exit(2)
sep=None
gold = None
verbose=False
N=10000
exact_threshold=10
trainingfile = None
training=None
terms=False
absolute=True
mbt=False
for o, a in opts:
if o in ('-h', '--help'):
_usage()
sys.exit()
if o in ('-H',):
print >>sys.stderr, __doc__
sys.exit(2)
if o in ('-s',):
sep = a
if sep == '\\t': sep='\t'
if o in ('-c',):
gold = a
if o in ('-v',):
verbose = True
if o in ('-n',):
N = int(a)
if o in ('-t',):
exact_threshold = int(a)
if o in ('-T',):
trainingfile = a
if o in ('-r',):
terms = True
if o in ('-a',):
absolute = False
if o in ('-m',):
mbt = True
if o in ('--yeh',):
Yeh()
sys.exit(0)
if len(args) == 1:
data = strata_read(args[0], sep=sep)
loginfo('-'*50)
loginfo('Datafile: %s' %os.path.basename(args[0]))
main3(data, verbose=verbose, N=N)
sys.exit(0)
elif len(args) != 2:
_usage()
sys.exit(1)
# The files with the systems
output1, output2 = args
if terms and not gold:
print >>sys.stderr, 'ERROR 2: when doing term significance testing a gold standard is needed (-c option)'
sys.exit(1)
if mbt and not gold:
print >>sys.stderr, 'ERROR 3: when doing MBT significance testing a gold standard is needed (-c option)'
sys.exit(1)
# Reading in the class labels
if gold:
if mbt:
goldlabels = mbtread(gold)
system1 = mbtread(output1)
system2 = mbtread(output2)
else:
if trainingfile: training = readtraining(trainingfile, sep=sep, index=None)
goldlabels = fread(gold, index=None)
system1 = fread(output1, index=None)
system2 = fread(output2, index=None)
else:
if trainingfile: training = readtraining(trainingfile, sep=sep, index=-1)
try:
goldlabels = fread(output1, index=-2, sep=sep)
except IndexError:
print >>sys.stderr, 'ERROR 4: Is the feature separator set correctly? (option -s is currently "%s")' %str(sep)
sys.exit(1)
check = fread(output2, index=-2, sep=sep)
if check != goldlabels:
print check, goldlabels
print >>sys.stderr, 'ERROR 5: File %s and %s should have the same gold reference labels.' %(output1, output2)
sys.exit(1)
del check
check1 = fread(output1, index=(0,-1), sep=sep)
check2 = fread(output2, index=(0,-1), sep=sep)
if check1 != check2:
print >>sys.stderr, 'ERROR 5: File %s and %s should be exactly the same up until the predicted class label.' %(output1, output2)
sys.exit(1)
del check1, check2
system1=fread(output1, index=-1, sep=sep)
system2=fread(output2, index=-1, sep=sep)
# Info
if verbose:
loginfo('-'*50)
loginfo('SYSTEM1 :%s' %output1)
loginfo('SYSTEM2 :%s' %output2)
if mbt:
loginfo('GOLD :%s' %gold)
loginfo('MBT style formatted files')
loginfo('%d sentences in input files' %len(system1))
else:
if gold:
loginfo('GOLD :%s' %gold)
if not terms: loginfo('Considering entire lines as class labels')
else:
loginfo('Considering the last field as the predicted class label')
loginfo('Considering the one but last field as the gold standard class label')
if sep is not None: loginfo('Using "%s" as feature separator' %sep)
if not terms: loginfo('%d instances in input files' %len(system1))
labels=set(goldlabels)
labels = labels.union(set(system1))
labels = labels.union(set(system2))
nlabels = len(labels)
labels=list(labels)
labels.sort()
if not mbt: loginfo('Found %d different labels/terms' %nlabels)
if nlabels < 10: loginfo(' %s' %(', '.join(labels)))
if trainingfile: loginfo('Computing averaged scores using class label counts from: %s' %trainingfile)
loginfo('')
loginfo('Computing %d shuffles' %N)
loginfo('H0: there is no difference between SYSTEM1 and SYSTEM2')
if terms and not absolute: loginfo('H1: SYSTEM1 performs better than SYSTEM2')
loginfo(' Commonly, you reject H0 if the probability drops below')
loginfo(' a predefined significance level, e.g 0.05.')
loginfo('-'*50)
# Run
try:
if gold and mbt:
probs = main(goldlabels, system1, system2, verbose=verbose, N=N, exact_threshold=exact_threshold, training=None, scoring=getscoresmbt)
#probs = main(goldlabels, system1, system2, verbose=verbose, N=N, exact_threshold=exact_threshold, training=None, scoring=getscoresmbtmulti)
elif gold and terms:
probs = main2(goldlabels, system1, system2, N=N, verbose=verbose, absolute=absolute, exact_threshold=exact_threshold)
else:
probs = main(goldlabels, system1, system2, verbose=verbose, N=N, exact_threshold=exact_threshold, training=training) #, scoring=getscoresmbtmulti)
except Exception, e:
raise
print >>sys.stderr, 'ERROR 1: %s' %(e.message)
sys.exit(1)
| mit |
robertwb/incubator-beam | sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query6.py | 5 | 3391 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Query 6, 'Average Selling Price by Seller'. Select the average selling price
over the last 10 closed auctions by the same seller. In CQL syntax::
SELECT Istream(AVG(Q.final), Q.seller)
FROM (SELECT Rstream(MAX(B.price) AS final, A.seller)
FROM Auction A [ROWS UNBOUNDED], Bid B [ROWS UNBOUNDED]
WHERE A.id=B.auction
AND B.datetime < A.expires AND A.expires < CURRENT_TIME
GROUP BY A.id, A.seller) [PARTITION BY A.seller ROWS 10] Q
GROUP BY Q.seller;
"""
import apache_beam as beam
from apache_beam.testing.benchmarks.nexmark.queries import nexmark_query_util
from apache_beam.testing.benchmarks.nexmark.queries import winning_bids
from apache_beam.testing.benchmarks.nexmark.queries.nexmark_query_util import ResultNames
from apache_beam.transforms import trigger
from apache_beam.transforms import window
def load(events, metadata=None, pipeline_options=None):
# find winning bids for each closed auction
return (
events
# find winning bids
| beam.Filter(nexmark_query_util.auction_or_bid)
| winning_bids.WinningBids()
# (auction_bids -> (aution.seller, bid)
| beam.Map(lambda auc_bid: (auc_bid.auction.seller, auc_bid.bid))
# calculate and output mean as data arrives
| beam.WindowInto(
window.GlobalWindows(),
trigger=trigger.Repeatedly(trigger.AfterCount(1)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING,
allowed_lateness=0)
| beam.CombinePerKey(MovingMeanSellingPriceFn(10))
| beam.Map(lambda t: {
ResultNames.SELLER: t[0], ResultNames.PRICE: t[1]
}))
class MovingMeanSellingPriceFn(beam.CombineFn):
"""
Combiner to keep track of up to max_num_bids of the most recent wining
bids and calculate their average selling price.
"""
def __init__(self, max_num_bids):
self.max_num_bids = max_num_bids
def create_accumulator(self):
return []
def add_input(self, accumulator, element):
accumulator.append(element)
new_accu = sorted(accumulator, key=lambda bid: (-bid.date_time, -bid.price))
if len(new_accu) > self.max_num_bids:
del new_accu[self.max_num_bids]
return new_accu
def merge_accumulators(self, accumulators):
new_accu = []
for accumulator in accumulators:
new_accu += accumulator
new_accu.sort(key=lambda bid: (bid.date_time, bid.price))
return new_accu[-self.max_num_bids:]
def extract_output(self, accumulator):
if len(accumulator) == 0:
return 0
sum_price = sum(bid.price for bid in accumulator)
return int(sum_price / len(accumulator))
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/test/pydocfodder.py | 194 | 6329 | """Something just to look at via pydoc."""
import types
class A_classic:
"A classic class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
class B_classic(A_classic):
"A classic class, derived from A_classic."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_classic(A_classic):
"A classic class, derived from A_classic."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_classic(B_classic, C_classic):
"A classic class, derived from B_classic and C_classic."
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class A_new(object):
"A new-style class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def A_classmethod(cls, x):
"A class method defined in A."
A_classmethod = classmethod(A_classmethod)
def A_staticmethod():
"A static method defined in A."
A_staticmethod = staticmethod(A_staticmethod)
def _getx(self):
"A property getter function."
def _setx(self, value):
"A property setter function."
def _delx(self):
"A property deleter function."
A_property = property(fdel=_delx, fget=_getx, fset=_setx,
doc="A sample property defined in A.")
A_int_alias = int
class B_new(A_new):
"A new-style class, derived from A_new."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_new(A_new):
"A new-style class, derived from A_new."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_new(B_new, C_new):
"""A new-style class, derived from B_new and C_new.
"""
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class FunkyProperties(object):
"""From SF bug 472347, by Roeland Rengelink.
Property getters etc may not be vanilla functions or methods,
and this used to make GUI pydoc blow up.
"""
def __init__(self):
self.desc = {'x':0}
class get_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print 'Get called', self, inst
return inst.desc[self.attr]
class set_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst, val):
print 'Set called', self, inst, val
inst.desc[self.attr] = val
class del_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print 'Del called', self, inst
del inst.desc[self.attr]
x = property(get_desc('x'), set_desc('x'), del_desc('x'), 'prop x')
submodule = types.ModuleType(__name__ + '.submodule',
"""A submodule, which should appear in its parent's summary""")
| gpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/theano/typed_list/tests/test_basic.py | 4 | 18954 | import unittest
from nose.plugins.skip import SkipTest
import numpy
import theano
import theano.typed_list
from theano import tensor as T
from theano.tensor.type_other import SliceType
from theano.typed_list.type import TypedListType
from theano.typed_list.basic import (GetItem, Insert,
Append, Extend, Remove, Reverse,
Index, Count, Length, make_list)
from theano import sparse
from theano.tests import unittest_tools as utt
# TODO, handle the case where scipy isn't installed.
try:
import scipy.sparse as sp
scipy_imported = True
except ImportError:
scipy_imported = False
# took from tensors/tests/test_basic.py
def rand_ranged_matrix(minimum, maximum, shape):
return numpy.asarray(numpy.random.rand(*shape) * (maximum - minimum) +
minimum, dtype=theano.config.floatX)
# took from sparse/tests/test_basic.py
def random_lil(shape, dtype, nnz):
rval = sp.lil_matrix(shape, dtype=dtype)
huge = 2 ** 30
for k in range(nnz):
# set non-zeros in random locations (row x, col y)
idx = numpy.random.random_integers(huge, size=2) % shape
value = numpy.random.rand()
# if dtype *int*, value will always be zeros!
if "int" in dtype:
value = int(value * 100)
# The call to tuple is needed as scipy 0.13.1 do not support
# ndarray with lenght 2 as idx tuple.
rval.__setitem__(
tuple(idx),
value)
return rval
class test_get_item(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def test_sanity_check_slice(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicSlice = SliceType()()
z = GetItem()(mySymbolicMatricesList, mySymbolicSlice)
self.assertFalse(isinstance(z, T.TensorVariable))
f = theano.function([mySymbolicMatricesList, mySymbolicSlice],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], slice(0, 1, 1)), [x]))
def test_sanity_check_single(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicScalar = T.scalar(dtype='int64')
z = GetItem()(mySymbolicMatricesList, mySymbolicScalar)
f = theano.function([mySymbolicMatricesList, mySymbolicScalar],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(0, dtype='int64')),
x))
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicScalar = T.scalar(dtype='int64')
z = mySymbolicMatricesList[mySymbolicScalar]
f = theano.function([mySymbolicMatricesList, mySymbolicScalar],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(0, dtype='int64')),
x))
z = mySymbolicMatricesList[0]
f = theano.function([mySymbolicMatricesList],
z)
self.assertTrue(numpy.array_equal(f([x]), x))
def test_wrong_input(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicMatrix = T.matrix()
self.assertRaises(TypeError, GetItem(), mySymbolicMatricesList,
mySymbolicMatrix)
def test_constant_input(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = GetItem()(mySymbolicMatricesList, 0)
f = theano.function([mySymbolicMatricesList],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x]), x))
z = GetItem()(mySymbolicMatricesList, slice(0, 1, 1))
f = theano.function([mySymbolicMatricesList],
z)
self.assertTrue(numpy.array_equal(f([x]), [x]))
class test_append(unittest.TestCase):
def test_inplace(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
z = Append(True)(mySymbolicMatricesList, myMatrix)
f = theano.function([mySymbolicMatricesList, myMatrix], z,
accept_inplace=True)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], y), [x, y]))
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
z = Append()(mySymbolicMatricesList, myMatrix)
f = theano.function([mySymbolicMatricesList, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], y), [x, y]))
def test_interfaces(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
z = mySymbolicMatricesList.append(myMatrix)
f = theano.function([mySymbolicMatricesList, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], y), [x, y]))
class test_extend(unittest.TestCase):
def test_inplace(self):
mySymbolicMatricesList1 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicMatricesList2 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Extend(True)(mySymbolicMatricesList1, mySymbolicMatricesList2)
f = theano.function([mySymbolicMatricesList1, mySymbolicMatricesList2],
z, accept_inplace=True)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
def test_sanity_check(self):
mySymbolicMatricesList1 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicMatricesList2 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Extend()(mySymbolicMatricesList1, mySymbolicMatricesList2)
f = theano.function([mySymbolicMatricesList1, mySymbolicMatricesList2],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
def test_interface(self):
mySymbolicMatricesList1 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicMatricesList2 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = mySymbolicMatricesList1.extend(mySymbolicMatricesList2)
f = theano.function([mySymbolicMatricesList1, mySymbolicMatricesList2],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
class test_insert(unittest.TestCase):
def test_inplace(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
myScalar = T.scalar(dtype='int64')
z = Insert(True)(mySymbolicMatricesList, myScalar, myMatrix)
f = theano.function([mySymbolicMatricesList, myScalar, myMatrix], z,
accept_inplace=True)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(1, dtype='int64'),
y),
[x, y]))
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
myScalar = T.scalar(dtype='int64')
z = Insert()(mySymbolicMatricesList, myScalar, myMatrix)
f = theano.function([mySymbolicMatricesList, myScalar, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], numpy.asarray(1,
dtype='int64'), y), [x, y]))
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
myScalar = T.scalar(dtype='int64')
z = mySymbolicMatricesList.insert(myScalar, myMatrix)
f = theano.function([mySymbolicMatricesList, myScalar, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(1, dtype='int64'),
y),
[x, y]))
class test_remove(unittest.TestCase):
def test_inplace(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
z = Remove(True)(mySymbolicMatricesList, myMatrix)
f = theano.function([mySymbolicMatricesList, myMatrix], z,
accept_inplace=True)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y], y), [x]))
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
z = Remove()(mySymbolicMatricesList, myMatrix)
f = theano.function([mySymbolicMatricesList, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y], y), [x]))
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
z = mySymbolicMatricesList.remove(myMatrix)
f = theano.function([mySymbolicMatricesList, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y], y), [x]))
class test_reverse(unittest.TestCase):
def test_inplace(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Reverse(True)(mySymbolicMatricesList)
f = theano.function([mySymbolicMatricesList], z,
accept_inplace=True)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y]), [y, x]))
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Reverse()(mySymbolicMatricesList)
f = theano.function([mySymbolicMatricesList], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y]), [y, x]))
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = mySymbolicMatricesList.reverse()
f = theano.function([mySymbolicMatricesList], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y]), [y, x]))
class test_index(unittest.TestCase):
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
z = Index()(mySymbolicMatricesList, myMatrix)
f = theano.function([mySymbolicMatricesList, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(f([x, y], y) == 1)
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
z = mySymbolicMatricesList.ind(myMatrix)
f = theano.function([mySymbolicMatricesList, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(f([x, y], y) == 1)
def test_non_tensor_type(self):
mySymbolicNestedMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)), 1)()
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Index()(mySymbolicNestedMatricesList, mySymbolicMatricesList)
f = theano.function([mySymbolicNestedMatricesList,
mySymbolicMatricesList], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(f([[x, y], [x, y, y]], [x, y]) == 0)
def test_sparse(self):
if not scipy_imported:
raise SkipTest('Optional package SciPy not installed')
mySymbolicSparseList = TypedListType(
sparse.SparseType('csr', theano.config.floatX))()
mySymbolicSparse = sparse.csr_matrix()
z = Index()(mySymbolicSparseList, mySymbolicSparse)
f = theano.function([mySymbolicSparseList, mySymbolicSparse], z)
x = sp.csr_matrix(random_lil((10, 40), theano.config.floatX, 3))
y = sp.csr_matrix(random_lil((10, 40), theano.config.floatX, 3))
self.assertTrue(f([x, y], y) == 1)
class test_count(unittest.TestCase):
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
z = Count()(mySymbolicMatricesList, myMatrix)
f = theano.function([mySymbolicMatricesList, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(f([y, y, x, y], y) == 3)
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
z = mySymbolicMatricesList.count(myMatrix)
f = theano.function([mySymbolicMatricesList, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(f([x, y], y) == 1)
def test_non_tensor_type(self):
mySymbolicNestedMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)), 1)()
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Count()(mySymbolicNestedMatricesList, mySymbolicMatricesList)
f = theano.function([mySymbolicNestedMatricesList,
mySymbolicMatricesList], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(f([[x, y], [x, y, y]], [x, y]) == 1)
def test_sparse(self):
if not scipy_imported:
raise SkipTest('Optional package SciPy not installed')
mySymbolicSparseList = TypedListType(
sparse.SparseType('csr', theano.config.floatX))()
mySymbolicSparse = sparse.csr_matrix()
z = Count()(mySymbolicSparseList, mySymbolicSparse)
f = theano.function([mySymbolicSparseList, mySymbolicSparse], z)
x = sp.csr_matrix(random_lil((10, 40), theano.config.floatX, 3))
y = sp.csr_matrix(random_lil((10, 40), theano.config.floatX, 3))
self.assertTrue(f([x, y, y], y) == 2)
class test_length(unittest.TestCase):
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Length()(mySymbolicMatricesList)
f = theano.function([mySymbolicMatricesList], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(f([x, x, x, x]) == 4)
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = mySymbolicMatricesList.__len__()
f = theano.function([mySymbolicMatricesList], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(f([x, x]) == 2)
class TestMakeList(unittest.TestCase):
def test_wrong_shape(self):
a = T.vector()
b = T.matrix()
self.assertRaises(TypeError, make_list, (a, b))
def test_correct_answer(self):
a = T.matrix()
b = T.matrix()
x = T.tensor3()
y = T.tensor3()
A = numpy.cast[theano.config.floatX](numpy.random.rand(5, 3))
B = numpy.cast[theano.config.floatX](numpy.random.rand(7, 2))
X = numpy.cast[theano.config.floatX](numpy.random.rand(5, 6, 1))
Y = numpy.cast[theano.config.floatX](numpy.random.rand(1, 9, 3))
make_list((3., 4.))
c = make_list((a, b))
z = make_list((x, y))
fc = theano.function([a, b], c)
fz = theano.function([x, y], z)
self.assertTrue((m == n).all() for m, n in zip(fc(A, B), [A, B]))
self.assertTrue((m == n).all() for m, n in zip(fz(X, Y), [X, Y]))
| mit |
personalrobotics/aikido | tests/gtest/test/gtest_list_output_unittest.py | 101 | 4919 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. If output is requested, via --gtest_output=xml
or --gtest_output=json, the tests are listed, with extra information in the
output file.
This script tests such functionality by invoking gtest_list_output_unittest_
(a program written with Google Test) the command line flags.
"""
import os
import re
import gtest_test_utils
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = '--gtest_output'
EXPECTED_XML = """<\?xml version="1.0" encoding="UTF-8"\?>
<testsuites tests="2" name="AllTests">
<testsuite name="FooTest" tests="2">
<testcase name="Test1" file=".*gtest_list_output_unittest_.cc" line="43" />
<testcase name="Test2" file=".*gtest_list_output_unittest_.cc" line="45" />
</testsuite>
</testsuites>
"""
EXPECTED_JSON = """{
"tests": 2,
"name": "AllTests",
"testsuites": \[
{
"name": "FooTest",
"tests": 2,
"testsuite": \[
{
"name": "Test1",
"file": ".*gtest_list_output_unittest_.cc",
"line": 43
},
{
"name": "Test2",
"file": ".*gtest_list_output_unittest_.cc",
"line": 45
}
\]
}
\]
}
"""
class GTestListTestsOutputUnitTest(gtest_test_utils.TestCase):
"""Unit test for Google Test's list tests with output to file functionality.
"""
def testXml(self):
"""Verifies XML output for listing tests in a Google Test binary.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestOutput('xml', EXPECTED_XML)
def testJSON(self):
"""Verifies XML output for listing tests in a Google Test binary.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestOutput('json', EXPECTED_JSON)
def _GetOutput(self, out_format):
file_path = os.path.join(gtest_test_utils.GetTempDir(),
'test_out.' + out_format)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_list_output_unittest_')
command = ([
gtest_prog_path,
'%s=%s:%s' % (GTEST_OUTPUT_FLAG, out_format, file_path),
'--gtest_list_tests'
])
environ_copy = os.environ.copy()
p = gtest_test_utils.Subprocess(
command, env=environ_copy, working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
with open(file_path) as f:
result = f.read()
return result
def _TestOutput(self, test_format, expected_output):
actual = self._GetOutput(test_format)
actual_lines = actual.splitlines()
expected_lines = expected_output.splitlines()
line_count = 0
for actual_line in actual_lines:
expected_line = expected_lines[line_count]
expected_line_re = re.compile(expected_line.strip())
self.assert_(
expected_line_re.match(actual_line.strip()),
('actual output of "%s",\n'
'which does not match expected regex of "%s"\n'
'on line %d' % (actual, expected_output, line_count)))
line_count = line_count + 1
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| bsd-3-clause |
conwin/node-gyp | gyp/test/variants/gyptest-variants.py | 240 | 1315 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify handling of build variants.
TODO: Right now, only the SCons generator supports this, so the
test case is SCons-specific. In particular, it relise on SCons'
ability to rebuild in response to changes on the command line. It
may be simpler to just drop this feature if the other generators
can't be made to behave the same way.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['scons'])
test.run_gyp('variants.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('variants.gyp', chdir='relocate/src')
test.run_built_executable('variants',
chdir='relocate/src',
stdout="Hello, world!\n")
test.sleep()
test.build('variants.gyp', 'VARIANT1=1', chdir='relocate/src')
test.run_built_executable('variants',
chdir='relocate/src',
stdout="Hello from VARIANT1\n")
test.sleep()
test.build('variants.gyp', 'VARIANT2=1', chdir='relocate/src')
test.run_built_executable('variants',
chdir='relocate/src',
stdout="Hello from VARIANT2\n")
test.pass_test()
| mit |
beijingren/roche-website | djiki/tests.py | 2 | 9743 | # -*- coding: utf-8 -*-
from django.conf import settings
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
# django < 1.5
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from . import models
from .auth.base import UnrestrictedAccess
content1 = u"""
= Hello world! =
This is a simple test page.
"""
description1 = u"Initial page version"
content2 = content1 + """
== Subsection ==
This page has a subsection.
"""
description2 = u"Subsection added"
content3 = """
= Hello world! =
Some text added here.
This is a simple test page.
== Subsection ==
This page has a subsection.
"""
description3 = u"Added some text"
class SimpleTest(TestCase):
def setUp(self):
settings.DJIKI_SPACES_AS_UNDERSCORES = False
settings.DJIKI_AUTHORIZATION_BACKEND = 'djiki.auth.base.UnrestrictedAccess'
self.user = User.objects.create(username='foouser')
user_password = 'foopassword'
self.user.set_password(user_password)
self.user.save()
self.admin = User.objects.create(username='admin', is_superuser=True)
admin_password = 'adminpassword'
self.admin.set_password(admin_password)
self.admin.save()
self.anon_client = Client()
self.user_client = Client()
self.admin_client = Client()
self.user_client.login(username='foouser', password=user_password)
self.admin_client.login(username='admin', password=admin_password)
def _page_edit(self, title, content, description='', username=None, password=None):
client = Client()
if username:
client.login(username=username, password=password)
rev_count = models.PageRevision.objects.filter(page__title=title).count()
try:
prev_rev = models.PageRevision.objects.filter(page__title=title).order_by('-created')[0].pk
except IndexError:
prev_rev = ''
r = client.get(reverse('djiki-page-edit', kwargs={'title': title}))
self.assertEqual(r.status_code, 200)
r = client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': content, 'description': description, 'prev_revision': prev_rev,
'action': 'preview'})
self.assertEqual(r.status_code, 200)
r = client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': content, 'description': description, 'prev_revision': prev_rev})
self.assertEqual(r.status_code, 302)
p = models.Page.objects.get(title=title)
self.assertEqual(p.revisions.count(), rev_count + 1)
self.assertEqual(p.last_revision().content, content)
if username:
self.assertEqual(p.last_revision().author.username, username)
else:
self.assertEqual(p.last_revision().author, None)
self.assertEqual(p.last_revision().description, description)
return p.last_revision()
def _page_revert(self, revision):
client = Client()
try:
prev_rev = models.PageRevision.objects.filter(page__title=revision.page.title)\
.order_by('-created')[0].pk
except IndexError:
prev_rev = ''
r = client.get(reverse('djiki-page-revert',
kwargs={'title': revision.page.title, 'revision_pk': revision.pk}))
self.assertEqual(r.status_code, 200)
r = client.post(reverse('djiki-page-revert',
kwargs={'title': revision.page.title, 'revision_pk': revision.pk}),
{'content': revision.content, 'description': '', 'prev_revision': prev_rev,
'action': 'preview'})
self.assertEqual(r.status_code, 200)
r = client.post(reverse('djiki-page-revert',
kwargs={'title': revision.page.title, 'revision_pk': revision.pk}),
{'content': revision.content, 'description': '', 'prev_revision': prev_rev})
self.assertEqual(r.status_code, 302)
return revision.page.last_revision()
def test_subsequent_edits(self):
title = u"Test page"
self._page_edit(title, content1, description1)
self._page_edit(title, content2, description2)
self._page_edit(title, content3, description3)
def test_revert(self):
title = u"Revert page"
r1 = self._page_edit(title, content1, description1)
r2 = self._page_edit(title, content2, description2)
r1n = self._page_revert(r1)
self.assertEqual(r1n.content, r1.content)
def test_underscores(self):
title_raw = u"Another test page, let's see..."
title_xlat = u"Another_test_page,_let's_see..."
self._page_edit(title_raw, "test content", "")
client = Client()
r = client.get(reverse('djiki-page-view', kwargs={'title': title_raw}))
self.assertEqual(200, r.status_code)
r = client.get(reverse('djiki-page-view', kwargs={'title': title_xlat}))
self.assertEqual(404, r.status_code)
settings.DJIKI_SPACES_AS_UNDERSCORES = True
r = client.get(reverse('djiki-page-view', kwargs={'title': title_raw}))
self.assertEqual(302, r.status_code)
r = client.get(reverse('djiki-page-view', kwargs={'title': title_xlat}))
self.assertEqual(200, r.status_code)
def test_edit_crash(self):
title = u"Crash page"
self._page_edit(title, content1, description1)
p = models.Page.objects.get(title=title)
first_revision = p.last_revision()
self._page_edit(title, content2, description2)
client = Client()
# attempt to save a new version with an outdated base revision
r = client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': content3, 'description': description3, 'prev_revision': first_revision.pk})
if r.status_code == 200:
print r.content
self.assertEqual(r.status_code, 302)
def test_edits(self):
title = u"Auth test page"
settings.DJIKI_AUTHORIZATION_BACKEND = 'djiki.auth.base.UnrestrictedAccess'
# anonymous create
r = self.anon_client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': "blah", "description": ""})
self.assertEqual(r.status_code, 302)
# anonymous edit
last_pk = models.Page.objects.get(title=title).last_revision().pk
r = self.anon_client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': "blah", "description": "", 'prev_revision': last_pk})
self.assertEqual(r.status_code, 302)
# authenticated edit
last_pk = models.Page.objects.get(title=title).last_revision().pk
r = self.user_client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': "blah", "description": "", 'prev_revision': last_pk})
self.assertEqual(r.status_code, 302)
# admin edit
last_pk = models.Page.objects.get(title=title).last_revision().pk
r = self.admin_client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': "blah", "description": "", 'prev_revision': last_pk})
self.assertEqual(r.status_code, 302)
settings.DJIKI_AUTHORIZATION_BACKEND = 'djiki.auth.base.OnlyAuthenticatedEdits'
# anonymous create
r = self.anon_client.post(reverse('djiki-page-edit', kwargs={'title': u'Other title 1'}),
{'content': "blah", "description": ""})
self.assertEqual(r.status_code, 403)
# anonymous edit
last_pk = models.Page.objects.get(title=title).last_revision().pk
r = self.anon_client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': "blah", "description": "", 'prev_revision': last_pk})
self.assertEqual(r.status_code, 403)
# authenticated create
r = self.user_client.post(reverse('djiki-page-edit', kwargs={'title': u'Other title 2'}),
{'content': "blah", "description": ""})
self.assertEqual(r.status_code, 302)
# authenticated edit
r = self.user_client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': "blah", "description": "", 'prev_revision': last_pk})
self.assertEqual(r.status_code, 302)
# admin edit
last_pk = models.Page.objects.get(title=title).last_revision().pk
r = self.admin_client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': "blah", "description": "", 'prev_revision': last_pk})
self.assertEqual(r.status_code, 302)
settings.DJIKI_AUTHORIZATION_BACKEND = 'djiki.auth.base.OnlyAdminEdits'
# anonymous create
r = self.anon_client.post(reverse('djiki-page-edit', kwargs={'title': u'Other title 3'}),
{'content': "blah", "description": ""})
self.assertEqual(r.status_code, 403)
# anonymous edit
last_pk = models.Page.objects.get(title=title).last_revision().pk
r = self.anon_client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': "blah", "description": "", 'prev_revision': last_pk})
self.assertEqual(r.status_code, 403)
# authenticated create
r = self.user_client.post(reverse('djiki-page-edit', kwargs={'title': u'Other title 4'}),
{'content': "blah", "description": ""})
self.assertEqual(r.status_code, 403)
# authenticated edit
r = self.user_client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': "blah", "description": "", 'prev_revision': last_pk})
self.assertEqual(r.status_code, 403)
# admin create
r = self.admin_client.post(reverse('djiki-page-edit', kwargs={'title': u'Other title 5'}),
{'content': "blah", "description": ""})
self.assertEqual(r.status_code, 302)
# admin edit
r = self.admin_client.post(reverse('djiki-page-edit', kwargs={'title': title}),
{'content': "blah", "description": "", 'prev_revision': last_pk})
self.assertEqual(r.status_code, 302)
def test_history_view(self):
title = u"History page"
self._page_edit(title, "foo bar", "baz")
settings.DJIKI_AUTHORIZATION_BACKEND = 'djiki.auth.base.UnrestrictedAccess'
r = self.anon_client.get(reverse('djiki-page-history', kwargs={'title': title}))
self.assertEqual(r.status_code, 200)
class NoHistoryView(UnrestrictedAccess):
def can_view_history(self, request, target):
return False
settings.DJIKI_AUTHORIZATION_BACKEND = NoHistoryView
r = self.anon_client.get(reverse('djiki-page-history', kwargs={'title': title}))
self.assertEqual(r.status_code, 403)
| mit |
r39132/airflow | tests/contrib/operators/test_awsbatch_operator.py | 3 | 7809 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import unittest
from airflow import configuration
from airflow.exceptions import AirflowException
from airflow.contrib.operators.awsbatch_operator import AWSBatchOperator
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
RESPONSE_WITHOUT_FAILURES = {
"jobName": "51455483-c62c-48ac-9b88-53a6a725baa3",
"jobId": "8ba9d676-4108-4474-9dca-8bbac1da9b19"
}
class TestAWSBatchOperator(unittest.TestCase):
@mock.patch('airflow.contrib.operators.awsbatch_operator.AwsHook')
def setUp(self, aws_hook_mock):
configuration.load_test_config()
self.aws_hook_mock = aws_hook_mock
self.batch = AWSBatchOperator(
task_id='task',
job_name='51455483-c62c-48ac-9b88-53a6a725baa3',
job_queue='queue',
job_definition='hello-world',
max_retries=5,
overrides={},
aws_conn_id=None,
region_name='eu-west-1')
def test_init(self):
self.assertEqual(self.batch.job_name, '51455483-c62c-48ac-9b88-53a6a725baa3')
self.assertEqual(self.batch.job_queue, 'queue')
self.assertEqual(self.batch.job_definition, 'hello-world')
self.assertEqual(self.batch.max_retries, 5)
self.assertEqual(self.batch.overrides, {})
self.assertEqual(self.batch.region_name, 'eu-west-1')
self.assertEqual(self.batch.aws_conn_id, None)
self.assertEqual(self.batch.hook, self.aws_hook_mock.return_value)
self.aws_hook_mock.assert_called_once_with(aws_conn_id=None)
def test_template_fields_overrides(self):
self.assertEqual(self.batch.template_fields, ('job_name', 'overrides',))
@mock.patch.object(AWSBatchOperator, '_wait_for_task_ended')
@mock.patch.object(AWSBatchOperator, '_check_success_task')
def test_execute_without_failures(self, check_mock, wait_mock):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
client_mock.submit_job.return_value = RESPONSE_WITHOUT_FAILURES
self.batch.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('batch',
region_name='eu-west-1')
client_mock.submit_job.assert_called_once_with(
jobQueue='queue',
jobName='51455483-c62c-48ac-9b88-53a6a725baa3',
containerOverrides={},
jobDefinition='hello-world'
)
wait_mock.assert_called_once_with()
check_mock.assert_called_once_with()
self.assertEqual(self.batch.jobId, '8ba9d676-4108-4474-9dca-8bbac1da9b19')
def test_execute_with_failures(self):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
client_mock.submit_job.return_value = ""
with self.assertRaises(AirflowException):
self.batch.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('batch',
region_name='eu-west-1')
client_mock.submit_job.assert_called_once_with(
jobQueue='queue',
jobName='51455483-c62c-48ac-9b88-53a6a725baa3',
containerOverrides={},
jobDefinition='hello-world'
)
def test_wait_end_tasks(self):
client_mock = mock.Mock()
self.batch.jobId = '8ba9d676-4108-4474-9dca-8bbac1da9b19'
self.batch.client = client_mock
self.batch._wait_for_task_ended()
client_mock.get_waiter.assert_called_once_with('job_execution_complete')
client_mock.get_waiter.return_value.wait.assert_called_once_with(
jobs=['8ba9d676-4108-4474-9dca-8bbac1da9b19']
)
self.assertEqual(sys.maxsize, client_mock.get_waiter.return_value.config.max_attempts)
def test_check_success_tasks_raises(self):
client_mock = mock.Mock()
self.batch.jobId = '8ba9d676-4108-4474-9dca-8bbac1da9b19'
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {
'jobs': []
}
with self.assertRaises(Exception) as e:
self.batch._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn('No job found for ', str(e.exception))
def test_check_success_tasks_raises_failed(self):
client_mock = mock.Mock()
self.batch.jobId = '8ba9d676-4108-4474-9dca-8bbac1da9b19'
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {
'jobs': [{
'status': 'FAILED',
'statusReason': 'This is an error reason',
'attempts': [{
'exitCode': 1
}]
}]
}
with self.assertRaises(Exception) as e:
self.batch._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn('Job failed with status ', str(e.exception))
def test_check_success_tasks_raises_pending(self):
client_mock = mock.Mock()
self.batch.jobId = '8ba9d676-4108-4474-9dca-8bbac1da9b19'
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {
'jobs': [{
'status': 'RUNNABLE'
}]
}
with self.assertRaises(Exception) as e:
self.batch._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn('This task is still pending ', str(e.exception))
def test_check_success_tasks_raises_multiple(self):
client_mock = mock.Mock()
self.batch.jobId = '8ba9d676-4108-4474-9dca-8bbac1da9b19'
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {
'jobs': [{
'status': 'FAILED',
'statusReason': 'This is an error reason',
'attempts': [{
'exitCode': 1
}, {
'exitCode': 10
}]
}]
}
with self.assertRaises(Exception) as e:
self.batch._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn('Job failed with status ', str(e.exception))
def test_check_success_task_not_raises(self):
client_mock = mock.Mock()
self.batch.jobId = '8ba9d676-4108-4474-9dca-8bbac1da9b19'
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {
'jobs': [{
'status': 'SUCCEEDED'
}]
}
self.batch._check_success_task()
# Ordering of str(dict) is not guaranteed.
client_mock.describe_jobs.assert_called_once_with(jobs=['8ba9d676-4108-4474-9dca-8bbac1da9b19'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
xwolf12/django | django/utils/deconstruct.py | 502 | 2047 | from importlib import import_module
from django.utils.version import get_docs_version
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
| bsd-3-clause |
entomb/CouchPotatoServer | libs/tmdb3/request.py | 34 | 5485 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: tmdb_request.py
# Python Library
# Author: Raymond Wagner
# Purpose: Wrapped urllib2.Request class pre-configured for accessing the
# TMDb v3 API
#-----------------------
from tmdb_exceptions import *
from locales import get_locale
from cache import Cache
from urllib import urlencode
import urllib2
import json
import os
DEBUG = False
cache = Cache(filename='pytmdb3.cache')
#DEBUG = True
#cache = Cache(engine='null')
def set_key(key):
"""
Specify the API key to use retrieving data from themoviedb.org.
This key must be set before any calls will function.
"""
if len(key) != 32:
raise TMDBKeyInvalid("Specified API key must be 128-bit hex")
try:
int(key, 16)
except:
raise TMDBKeyInvalid("Specified API key must be 128-bit hex")
Request._api_key = key
def set_cache(engine=None, *args, **kwargs):
"""Specify caching engine and properties."""
cache.configure(engine, *args, **kwargs)
class Request(urllib2.Request):
_api_key = None
_base_url = "http://api.themoviedb.org/3/"
@property
def api_key(self):
if self._api_key is None:
raise TMDBKeyMissing("API key must be specified before " +
"requests can be made")
return self._api_key
def __init__(self, url, **kwargs):
"""
Return a request object, using specified API path and
arguments.
"""
kwargs['api_key'] = self.api_key
self._url = url.lstrip('/')
self._kwargs = dict([(kwa, kwv) for kwa, kwv in kwargs.items()
if kwv is not None])
locale = get_locale()
kwargs = {}
for k, v in self._kwargs.items():
kwargs[k] = locale.encode(v)
url = '{0}{1}?{2}'\
.format(self._base_url, self._url, urlencode(kwargs))
urllib2.Request.__init__(self, url)
self.add_header('Accept', 'application/json')
self.lifetime = 3600 # 1hr
def new(self, **kwargs):
"""
Create a new instance of the request, with tweaked arguments.
"""
args = dict(self._kwargs)
for k, v in kwargs.items():
if v is None:
if k in args:
del args[k]
else:
args[k] = v
obj = self.__class__(self._url, **args)
obj.lifetime = self.lifetime
return obj
def add_data(self, data):
"""Provide data to be sent with POST."""
urllib2.Request.add_data(self, urlencode(data))
def open(self):
"""Open a file object to the specified URL."""
try:
if DEBUG:
print 'loading '+self.get_full_url()
if self.has_data():
print ' '+self.get_data()
return urllib2.urlopen(self)
except urllib2.HTTPError, e:
raise TMDBHTTPError(e)
def read(self):
"""Return result from specified URL as a string."""
return self.open().read()
@cache.cached(urllib2.Request.get_full_url)
def readJSON(self):
"""Parse result from specified URL as JSON data."""
url = self.get_full_url()
try:
# catch HTTP error from open()
data = json.load(self.open())
except TMDBHTTPError, e:
try:
# try to load whatever was returned
data = json.loads(e.response)
except:
# cannot parse json, just raise existing error
raise e
else:
# response parsed, try to raise error from TMDB
handle_status(data, url)
# no error from TMDB, just raise existing error
raise e
handle_status(data, url)
if DEBUG:
import pprint
pprint.PrettyPrinter().pprint(data)
return data
status_handlers = {
1: None,
2: TMDBRequestInvalid('Invalid service - This service does not exist.'),
3: TMDBRequestError('Authentication Failed - You do not have ' +
'permissions to access this service.'),
4: TMDBRequestInvalid("Invalid format - This service doesn't exist " +
'in that format.'),
5: TMDBRequestInvalid('Invalid parameters - Your request parameters ' +
'are incorrect.'),
6: TMDBRequestInvalid('Invalid id - The pre-requisite id is invalid ' +
'or not found.'),
7: TMDBKeyInvalid('Invalid API key - You must be granted a valid key.'),
8: TMDBRequestError('Duplicate entry - The data you tried to submit ' +
'already exists.'),
9: TMDBOffline('This service is tempirarily offline. Try again later.'),
10: TMDBKeyRevoked('Suspended API key - Access to your account has been ' +
'suspended, contact TMDB.'),
11: TMDBError('Internal error - Something went wrong. Contact TMDb.'),
12: None,
13: None,
14: TMDBRequestError('Authentication Failed.'),
15: TMDBError('Failed'),
16: TMDBError('Device Denied'),
17: TMDBError('Session Denied')}
def handle_status(data, query):
status = status_handlers[data.get('status_code', 1)]
if status is not None:
status.tmdberrno = data['status_code']
status.query = query
raise status
| gpl-3.0 |
fkorotkov/pants | tests/python/pants_test/backend/jvm/tasks/jvm_compile/test_jvm_compile.py | 7 | 1830 | # coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants_test.tasks.task_test_base import TaskTestBase
class DummyJvmCompile(JvmCompile):
def create_analysis_tools(self):
return None
class JvmCompileTest(TaskTestBase):
DEFAULT_CONF = 'default'
@classmethod
def task_type(cls):
return DummyJvmCompile
def test_if_runtime_classpath_exists(self):
target = self.make_target(
'java/classpath:java_lib',
target_type=JavaLibrary,
sources=['com/foo/Bar.java'],
)
context = self.context(target_roots=[target])
compile_classpath = context.products.get_data('compile_classpath', ClasspathProducts.init_func(self.pants_workdir))
compile_entry = os.path.join(self.pants_workdir, 'compile-entry')
pre_init_runtime_entry = os.path.join(self.pants_workdir, 'pre-inited-runtime-entry')
compile_classpath.add_for_targets([target], [('default', compile_entry)])
runtime_classpath = context.products.get_data('runtime_classpath', ClasspathProducts.init_func(self.pants_workdir))
runtime_classpath.add_for_targets([target], [('default', pre_init_runtime_entry)])
task = self.create_task(context)
resulting_classpath = task.create_runtime_classpath()
self.assertEqual([('default', pre_init_runtime_entry), ('default', compile_entry)],
resulting_classpath.get_for_target(target))
| apache-2.0 |
pbaesse/Sissens | lib/python2.7/site-packages/wheel/pep425tags.py | 70 | 5760 | """Generate and work with PEP 425 Compatibility Tags."""
import distutils.util
import platform
import sys
import sysconfig
import warnings
def get_config_var(var):
try:
return sysconfig.get_config_var(var)
except IOError as e: # pip Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
return None
def get_abbr_impl():
"""Return abbreviated implementation name."""
impl = platform.python_implementation()
if impl == 'PyPy':
return 'pp'
elif impl == 'Jython':
return 'jy'
elif impl == 'IronPython':
return 'ip'
elif impl == 'CPython':
return 'cp'
raise LookupError('Unknown Python implementation: ' + impl)
def get_impl_ver():
"""Return implementation version."""
impl_ver = get_config_var("py_version_nodot")
if not impl_ver or get_abbr_impl() == 'pp':
impl_ver = ''.join(map(str, get_impl_version_info()))
return impl_ver
def get_impl_version_info():
"""Return sys.version_info-like tuple for use in decrementing the minor
version."""
if get_abbr_impl() == 'pp':
# as per https://github.com/pypa/pip/issues/2882
return (sys.version_info[0], sys.pypy_version_info.major,
sys.pypy_version_info.minor)
else:
return sys.version_info[0], sys.version_info[1]
def get_flag(var, fallback, expected=True, warn=True):
"""Use a fallback method for determining SOABI flags if the needed config
var is unset or unavailable."""
val = get_config_var(var)
if val is None:
if warn:
warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
"be incorrect".format(var), RuntimeWarning, 2)
return fallback()
return val == expected
def get_abi_tag():
"""Return the ABI tag based on SOABI (if available) or emulate SOABI
(CPython 2, PyPy)."""
soabi = get_config_var('SOABI')
impl = get_abbr_impl()
if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
d = ''
m = ''
u = ''
if get_flag('Py_DEBUG',
lambda: hasattr(sys, 'gettotalrefcount'),
warn=(impl == 'cp')):
d = 'd'
if get_flag('WITH_PYMALLOC',
lambda: impl == 'cp',
warn=(impl == 'cp')):
m = 'm'
if get_flag('Py_UNICODE_SIZE',
lambda: sys.maxunicode == 0x10ffff,
expected=4,
warn=(impl == 'cp' and
sys.version_info < (3, 3))) \
and sys.version_info < (3, 3):
u = 'u'
abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
elif soabi and soabi.startswith('cpython-'):
abi = 'cp' + soabi.split('-')[1]
elif soabi:
abi = soabi.replace('.', '_').replace('-', '_')
else:
abi = None
return abi
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
result = distutils.util.get_platform().replace('.', '_').replace('-', '_')
if result == "linux_x86_64" and sys.maxsize == 2147483647:
# pip pull request #3497
result = "linux_i686"
return result
def get_supported(versions=None, supplied_platform=None):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
version_info = get_impl_version_info()
major = version_info[:-1]
# Support all previous minor Python versions.
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = get_abbr_impl()
abis = []
abi = get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
platforms = []
if supplied_platform:
platforms.append(supplied_platform)
platforms.append(get_platform())
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in platforms:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# abi3 modules compatible with older version of Python
for version in versions[1:]:
# abi3 was introduced in Python 3.2
if version in ('31', '30'):
break
for abi in abi3s: # empty set if not Python 3
for arch in platforms:
supported.append(("%s%s" % (impl, version), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# Major Python version + platform; e.g. binaries not using the Python API
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
| gpl-3.0 |
okwow123/djangol2 | example/env/lib/python2.7/site-packages/openid/extensions/draft/pape2.py | 156 | 9330 | """An implementation of the OpenID Provider Authentication Policy
Extension 1.0
@see: http://openid.net/developers/specs/
@since: 2.1.0
"""
__all__ = [
'Request',
'Response',
'ns_uri',
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
]
from openid.extension import Extension
import re
ns_uri = "http://specs.openid.net/extensions/pape/1.0"
AUTH_MULTI_FACTOR_PHYSICAL = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor-physical'
AUTH_MULTI_FACTOR = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor'
AUTH_PHISHING_RESISTANT = \
'http://schemas.openid.net/pape/policies/2007/06/phishing-resistant'
TIME_VALIDATOR = re.compile('^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ$')
class Request(Extension):
"""A Provider Authentication Policy request, sent from a relying
party to a provider
@ivar preferred_auth_policies: The authentication policies that
the relying party prefers
@type preferred_auth_policies: [str]
@ivar max_auth_age: The maximum time, in seconds, that the relying
party wants to allow to have elapsed before the user must
re-authenticate
@type max_auth_age: int or NoneType
"""
ns_alias = 'pape'
def __init__(self, preferred_auth_policies=None, max_auth_age=None):
super(Request, self).__init__()
if not preferred_auth_policies:
preferred_auth_policies = []
self.preferred_auth_policies = preferred_auth_policies
self.max_auth_age = max_auth_age
def __nonzero__(self):
return bool(self.preferred_auth_policies or
self.max_auth_age is not None)
def addPolicyURI(self, policy_uri):
"""Add an acceptable authentication policy URI to this request
This method is intended to be used by the relying party to add
acceptable authentication types to the request.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(policy_uri)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
ns_args = {
'preferred_auth_policies':' '.join(self.preferred_auth_policies)
}
if self.max_auth_age is not None:
ns_args['max_auth_age'] = str(self.max_auth_age)
return ns_args
def fromOpenIDRequest(cls, request):
"""Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message
"""
self = cls()
args = request.message.getArgs(self.ns_uri)
if args == {}:
return None
self.parseExtensionArgs(args)
return self
fromOpenIDRequest = classmethod(fromOpenIDRequest)
def parseExtensionArgs(self, args):
"""Set the state of this request to be that expressed in these
PAPE arguments
@param args: The PAPE arguments without a namespace
@rtype: None
@raises ValueError: When the max_auth_age is not parseable as
an integer
"""
# preferred_auth_policies is a space-separated list of policy URIs
self.preferred_auth_policies = []
policies_str = args.get('preferred_auth_policies')
if policies_str:
for uri in policies_str.split(' '):
if uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(uri)
# max_auth_age is base-10 integer number of seconds
max_auth_age_str = args.get('max_auth_age')
self.max_auth_age = None
if max_auth_age_str:
try:
self.max_auth_age = int(max_auth_age_str)
except ValueError:
pass
def preferredTypes(self, supported_types):
"""Given a list of authentication policy URIs that a provider
supports, this method returns the subsequence of those types
that are preferred by the relying party.
@param supported_types: A sequence of authentication policy
type URIs that are supported by a provider
@returns: The sub-sequence of the supported types that are
preferred by the relying party. This list will be ordered
in the order that the types appear in the supported_types
sequence, and may be empty if the provider does not prefer
any of the supported authentication types.
@returntype: [str]
"""
return filter(self.preferred_auth_policies.__contains__,
supported_types)
Request.ns_uri = ns_uri
class Response(Extension):
"""A Provider Authentication Policy response, sent from a provider
to a relying party
"""
ns_alias = 'pape'
def __init__(self, auth_policies=None, auth_time=None,
nist_auth_level=None):
super(Response, self).__init__()
if auth_policies:
self.auth_policies = auth_policies
else:
self.auth_policies = []
self.auth_time = auth_time
self.nist_auth_level = nist_auth_level
def addPolicyURI(self, policy_uri):
"""Add a authentication policy to this response
This method is intended to be used by the provider to add a
policy that the provider conformed to when authenticating the user.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri not in self.auth_policies:
self.auth_policies.append(policy_uri)
def fromSuccessResponse(cls, success_response):
"""Create a C{L{Response}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@rtype: Response or None
@returns: A provider authentication policy response from the
data that was supplied with the C{id_res} response or None
if the provider sent no signed PAPE response arguments.
"""
self = cls()
# PAPE requires that the args be signed.
args = success_response.getSignedNS(self.ns_uri)
# Only try to construct a PAPE response if the arguments were
# signed in the OpenID response. If not, return None.
if args is not None:
self.parseExtensionArgs(args)
return self
else:
return None
def parseExtensionArgs(self, args, strict=False):
"""Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object.
"""
policies_str = args.get('auth_policies')
if policies_str and policies_str != 'none':
self.auth_policies = policies_str.split(' ')
nist_level_str = args.get('nist_auth_level')
if nist_level_str:
try:
nist_level = int(nist_level_str)
except ValueError:
if strict:
raise ValueError('nist_auth_level must be an integer between '
'zero and four, inclusive')
else:
self.nist_auth_level = None
else:
if 0 <= nist_level < 5:
self.nist_auth_level = nist_level
auth_time = args.get('auth_time')
if auth_time:
if TIME_VALIDATOR.match(auth_time):
self.auth_time = auth_time
elif strict:
raise ValueError("auth_time must be in RFC3339 format")
fromSuccessResponse = classmethod(fromSuccessResponse)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
if len(self.auth_policies) == 0:
ns_args = {
'auth_policies':'none',
}
else:
ns_args = {
'auth_policies':' '.join(self.auth_policies),
}
if self.nist_auth_level is not None:
if self.nist_auth_level not in range(0, 5):
raise ValueError('nist_auth_level must be an integer between '
'zero and four, inclusive')
ns_args['nist_auth_level'] = str(self.nist_auth_level)
if self.auth_time is not None:
if not TIME_VALIDATOR.match(self.auth_time):
raise ValueError('auth_time must be in RFC3339 format')
ns_args['auth_time'] = self.auth_time
return ns_args
Response.ns_uri = ns_uri
| mit |
hwroitzsch/BikersLifeSaver | lib/python3.5/site-packages/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| mit |
x303597316/hue | desktop/core/ext-py/requests-2.6.0/requests/api.py | 160 | 5280 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| apache-2.0 |
stefanweller/ansible-modules-extras | cloud/azure/azure_rm_deployment.py | 3 | 27691 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: azure_rm_deployment
short_description: Create or destroy Azure Resource Manager template deployments
version_added: "2.1"
description:
- "Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python.
You can find some quick start templates in GitHub here https://github.com/azure/azure-quickstart-templates.
For more information on Azue resource manager templates see https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/."
options:
resource_group_name:
description:
- The resource group name to use or create to host the deployed template
required: true
location:
description:
- The geo-locations in which the resource group will be located.
required: false
default: westus
deployment_mode:
description:
- In incremental mode, resources are deployed without deleting existing resources that are not included in the template.
In complete mode resources are deployed and existing resources in the resource group not included in the template are deleted.
required: false
default: complete
choices:
- complete
- incremental
state:
description:
- If state is "present", template will be created. If state is "present" and if deployment exists, it will be
updated. If state is "absent", stack will be removed.
default: present
required: false
choices:
- present
- absent
template:
description:
- A hash containing the templates inline. This parameter is mutually exclusive with 'template_link'.
Either one of them is required if "state" parameter is "present".
required: false
default: null
template_link:
description:
- Uri of file containing the template body. This parameter is mutually exclusive with 'template'. Either one
of them is required if "state" parameter is "present".
required: false
default: null
parameters:
description:
- A hash of all the required template variables for the deployment template. This parameter is mutually exclusive
with 'parameters_link'. Either one of them is required if "state" parameter is "present".
required: false
default: null
parameters_link:
description:
- Uri of file containing the parameters body. This parameter is mutually exclusive with 'parameters'. Either
one of them is required if "state" parameter is "present".
required: false
default: null
deployment_mode:
description:
- Specifies whether the deployment template should delete resources not specified in the template (complete)
or ignore them (incremental).
default: complete
choices:
- complete
- incremental
deployment_name:
description:
- The name of the deployment to be tracked in the resource group deployment history. Re-using a deployment name
will overwrite the previous value in the resource group's deployment history.
default: ansible-arm
wait_for_deployment_completion:
description:
- Whether or not to block until the deployment has completed.
default: yes
choices: ['yes', 'no']
wait_for_deployment_polling_period:
description:
- Time (in seconds) to wait between polls when waiting for deployment completion.
default: 10
extends_documentation_fragment:
- azure
author:
- David Justice (@devigned)
- Laurent Mazuel (@lmazuel)
- Andre Price (@obsoleted)
'''
EXAMPLES = '''
# Destroy a template deployment
- name: Destroy Azure Deploy
azure_rm_deployment:
state: absent
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
# Create or update a template deployment based on uris using parameter and template links
- name: Create Azure Deploy
azure_rm_deployment:
state: present
resource_group_name: dev-ops-cle
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json'
parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json'
# Create or update a template deployment based on a uri to the template and parameters specified inline.
# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then
# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH.
---
- hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Destroy Azure Deploy
azure_rm_deployment:
state: absent
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
- name: Create Azure Deploy
azure_rm_deployment:
state: present
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
parameters:
newStorageAccountName:
value: devopsclestorage1
adminUsername:
value: devopscle
dnsNameForPublicIP:
value: devopscleazure
location:
value: West US
vmSize:
value: Standard_A2
vmName:
value: ansibleSshVm
sshKeyData:
value: YOUR_SSH_PUBLIC_KEY
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json'
register: azure
- name: Add new instance to host group
add_host: hostname={{ item['ips'][0].public_ip }} groupname=azure_vms
with_items: azure.deployment.instances
- hosts: azure_vms
user: devopscle
tasks:
- name: Wait for SSH to come up
wait_for: port=22 timeout=2000 state=started
- name: echo the hostname of the vm
shell: hostname
# Deploy an Azure WebApp running a hello world'ish node app
- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js
azure_rm_deployment:
state: present
subscription_id: cbbdaed0-fea9-4693-bf0c-d446ac93c030
resource_group_name: dev-ops-cle-webapp
parameters:
repoURL:
value: 'https://github.com/devigned/az-roadshow-oss.git'
siteName:
value: devopscleweb
hostingPlanName:
value: someplan
siteLocation:
value: westus
sku:
value: Standard
template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json'
# Create or update a template deployment based on an inline template and parameters
- name: Create Azure Deploy
azure_rm_deploy:
state: present
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
template:
$schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#"
contentVersion: "1.0.0.0"
parameters:
newStorageAccountName:
type: "string"
metadata:
description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed."
adminUsername:
type: "string"
metadata:
description: "User name for the Virtual Machine."
adminPassword:
type: "securestring"
metadata:
description: "Password for the Virtual Machine."
dnsNameForPublicIP:
type: "string"
metadata:
description: "Unique DNS Name for the Public IP used to access the Virtual Machine."
ubuntuOSVersion:
type: "string"
defaultValue: "14.04.2-LTS"
allowedValues:
- "12.04.5-LTS"
- "14.04.2-LTS"
- "15.04"
metadata:
description: "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
variables:
location: "West US"
imagePublisher: "Canonical"
imageOffer: "UbuntuServer"
OSDiskName: "osdiskforlinuxsimple"
nicName: "myVMNic"
addressPrefix: "10.0.0.0/16"
subnetName: "Subnet"
subnetPrefix: "10.0.0.0/24"
storageAccountType: "Standard_LRS"
publicIPAddressName: "myPublicIP"
publicIPAddressType: "Dynamic"
vmStorageAccountContainerName: "vhds"
vmName: "MyUbuntuVM"
vmSize: "Standard_D1"
virtualNetworkName: "MyVNET"
vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]"
subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]"
resources:
-
type: "Microsoft.Storage/storageAccounts"
name: "[parameters('newStorageAccountName')]"
apiVersion: "2015-05-01-preview"
location: "[variables('location')]"
properties:
accountType: "[variables('storageAccountType')]"
-
apiVersion: "2015-05-01-preview"
type: "Microsoft.Network/publicIPAddresses"
name: "[variables('publicIPAddressName')]"
location: "[variables('location')]"
properties:
publicIPAllocationMethod: "[variables('publicIPAddressType')]"
dnsSettings:
domainNameLabel: "[parameters('dnsNameForPublicIP')]"
-
type: "Microsoft.Network/virtualNetworks"
apiVersion: "2015-05-01-preview"
name: "[variables('virtualNetworkName')]"
location: "[variables('location')]"
properties:
addressSpace:
addressPrefixes:
- "[variables('addressPrefix')]"
subnets:
-
name: "[variables('subnetName')]"
properties:
addressPrefix: "[variables('subnetPrefix')]"
-
type: "Microsoft.Network/networkInterfaces"
apiVersion: "2015-05-01-preview"
name: "[variables('nicName')]"
location: "[variables('location')]"
dependsOn:
- "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]"
- "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
properties:
ipConfigurations:
-
name: "ipconfig1"
properties:
privateIPAllocationMethod: "Dynamic"
publicIPAddress:
id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]"
subnet:
id: "[variables('subnetRef')]"
-
type: "Microsoft.Compute/virtualMachines"
apiVersion: "2015-06-15"
name: "[variables('vmName')]"
location: "[variables('location')]"
dependsOn:
- "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]"
- "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]"
properties:
hardwareProfile:
vmSize: "[variables('vmSize')]"
osProfile:
computername: "[variables('vmName')]"
adminUsername: "[parameters('adminUsername')]"
adminPassword: "[parameters('adminPassword')]"
storageProfile:
imageReference:
publisher: "[variables('imagePublisher')]"
offer: "[variables('imageOffer')]"
sku: "[parameters('ubuntuOSVersion')]"
version: "latest"
osDisk:
name: "osdisk"
vhd:
uri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',variables('OSDiskName'),'.vhd')]"
caching: "ReadWrite"
createOption: "FromImage"
networkProfile:
networkInterfaces:
-
id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]"
diagnosticsProfile:
bootDiagnostics:
enabled: "true"
storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]"
parameters:
newStorageAccountName:
value: devopsclestorage
adminUsername:
value: devopscle
adminPassword:
value: Password1!
dnsNameForPublicIP:
value: devopscleazure
'''
RETURN = '''
deployment:
description: Deployment details
type: dict
returned: always
sample:
group_name:
description: Name of the resource group
type: string
returned: always
id:
description: The Azure ID of the deployment
type: string
returned: always
instances:
description: Provides the public IP addresses for each VM instance.
type: list
returned: always
name:
description: Name of the deployment
type: string
returned: always
outputs:
description: Dictionary of outputs received from the deployment
type: dict
returned: always
'''
PREREQ_IMPORT_ERROR = None
try:
import time
import yaml
except ImportError as exc:
IMPORT_ERROR = "Error importing module prerequisites: %s" % exc
from ansible.module_utils.azure_rm_common import *
try:
from itertools import chain
from azure.common.credentials import ServicePrincipalCredentials
from azure.common.exceptions import CloudError
from azure.mgmt.resource.resources.models import (DeploymentProperties,
ParametersLink,
TemplateLink,
Deployment,
ResourceGroup,
Dependency)
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMDeploymentManager(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(type='str', required=True, aliases=['resource_group']),
state=dict(type='str', default='present', choices=['present', 'absent']),
template=dict(type='dict', default=None),
parameters=dict(type='dict', default=None),
template_link=dict(type='str', default=None),
parameters_link=dict(type='str', default=None),
location=dict(type='str', default="westus"),
deployment_mode=dict(type='str', default='complete', choices=['complete', 'incremental']),
deployment_name=dict(type='str', default="ansible-arm"),
wait_for_deployment_completion=dict(type='bool', default=True),
wait_for_deployment_polling_period=dict(type='int', default=10)
)
mutually_exclusive = [('template', 'template_link'),
('parameters', 'parameters_link')]
self.resource_group_name = None
self.state = None
self.template = None
self.parameters = None
self.template_link = None
self.parameters_link = None
self.location = None
self.deployment_mode = None
self.deployment_name = None
self.wait_for_deployment_completion = None
self.wait_for_deployment_polling_period = None
self.tags = None
self.results = dict(
deployment=dict(),
changed=False,
msg=""
)
super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=False)
def exec_module(self, **kwargs):
if PREREQ_IMPORT_ERROR:
self.fail(PREREQ_IMPORT_ERROR)
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
if self.state == 'present':
deployment = self.deploy_template()
self.results['deployment'] = dict(
name=deployment.name,
group_name=self.resource_group_name,
id=deployment.id,
outputs=deployment.properties.outputs,
instances=self._get_instances(deployment)
)
self.results['changed'] = True
self.results['msg'] = 'deployment succeeded'
else:
if self.resource_group_exists(self.resource_group_name):
self.destroy_resource_group()
self.results['changed'] = True
self.results['msg'] = "deployment deleted"
return self.results
def deploy_template(self):
"""
Deploy the targeted template and parameters
:param module: Ansible module containing the validated configuration for the deployment template
:param client: resource management client for azure
:param conn_info: connection info needed
:return:
"""
deploy_parameter = DeploymentProperties()
deploy_parameter.mode = self.deployment_mode
if not self.parameters_link:
deploy_parameter.parameters = self.parameters
else:
deploy_parameter.parameters_link = ParametersLink(
uri=self.parameters_link
)
if not self.template_link:
deploy_parameter.template = self.template
else:
deploy_parameter.template_link = TemplateLink(
uri=self.template_link
)
params = ResourceGroup(location=self.location, tags=self.tags)
try:
self.rm_client.resource_groups.create_or_update(self.resource_group_name, params)
except CloudError as exc:
self.fail("Resource group create_or_update failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
try:
result = self.rm_client.deployments.create_or_update(self.resource_group_name,
self.deployment_name,
deploy_parameter)
deployment_result = self.get_poller_result(result)
if self.wait_for_deployment_completion:
while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
'Succeeded']:
time.sleep(self.wait_for_deployment_polling_period)
deployment_result = self.rm_client.deployments.get(self.resource_group_name, self.deployment_name)
except CloudError as exc:
failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
self.log("Deployment failed %s: %s" % (exc.status_code, exc.message))
self.fail("Deployment failed with status code: %s and message: %s" % (exc.status_code, exc.message),
failed_deployment_operations=failed_deployment_operations)
if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded':
self.log("provisioning state: %s" % deployment_result.properties.provisioning_state)
failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
self.fail('Deployment failed. Deployment id: %s' % deployment_result.id,
failed_deployment_operations=failed_deployment_operations)
return deployment_result
def destroy_resource_group(self):
"""
Destroy the targeted resource group
"""
try:
result = self.rm_client.resource_groups.delete(self.resource_group_name)
result.wait() # Blocking wait till the delete is finished
except CloudError as e:
if e.status_code == 404 or e.status_code == 204:
return
else:
self.fail("Delete resource group and deploy failed with status code: %s and message: %s" %
(e.status_code, e.message))
def resource_group_exists(self, resource_group):
'''
Return True/False based on existence of requested resource group.
:param resource_group: string. Name of a resource group.
:return: boolean
'''
try:
self.rm_client.resource_groups.get(resource_group)
except CloudError:
return False
return True
def _get_failed_nested_operations(self, current_operations):
new_operations = []
for operation in current_operations:
if operation.properties.provisioning_state == 'Failed':
new_operations.append(operation)
if operation.properties.target_resource and \
'Microsoft.Resources/deployments' in operation.properties.target_resource.id:
nested_deployment = operation.properties.target_resource.resource_name
try:
nested_operations = self.rm_client.deployment_operations.list(self.resource_group_name,
nested_deployment)
except CloudError as exc:
self.fail("List nested deployment operations failed with status code: %s and message: %s" %
(e.status_code, e.message))
new_nested_operations = self._get_failed_nested_operations(nested_operations)
new_operations += new_nested_operations
return new_operations
def _get_failed_deployment_operations(self, deployment_name):
results = []
# time.sleep(15) # there is a race condition between when we ask for deployment status and when the
# # status is available.
try:
operations = self.rm_client.deployment_operations.list(self.resource_group_name, deployment_name)
except CloudError as exc:
self.fail("Get deployment failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
try:
results = [
dict(
id=op.id,
operation_id=op.operation_id,
status_code=op.properties.status_code,
status_message=op.properties.status_message,
target_resource=dict(
id=op.properties.target_resource.id,
resource_name=op.properties.target_resource.resource_name,
resource_type=op.properties.target_resource.resource_type
) if op.properties.target_resource else None,
provisioning_state=op.properties.provisioning_state,
)
for op in self._get_failed_nested_operations(operations)
]
except:
# If we fail here, the original error gets lost and user receives wrong error message/stacktrace
pass
self.log(dict(failed_deployment_operations=results), pretty_print=True)
return results
def _get_instances(self, deployment):
dep_tree = self._build_hierarchy(deployment.properties.dependencies)
vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines")
vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces"))
for vm in vms]
vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics))
for vm, nics in vms_and_nics]
return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip)
for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0]
def _get_dependencies(self, dep_tree, resource_type):
matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type]
for child_tree in [value['children'] for value in dep_tree.values()]:
matches += self._get_dependencies(child_tree, resource_type)
return matches
def _build_hierarchy(self, dependencies, tree=None):
tree = dict(top=True) if tree is None else tree
for dep in dependencies:
if dep.resource_name not in tree:
tree[dep.resource_name] = dict(dep=dep, children=dict())
if isinstance(dep, Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0:
self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children'])
if 'top' in tree:
tree.pop('top', None)
keys = list(tree.keys())
for key1 in keys:
for key2 in keys:
if key2 in tree and key1 in tree[key2]['children'] and key1 in tree:
tree[key2]['children'][key1] = tree[key1]
tree.pop(key1)
return tree
def _get_ip_dict(self, ip):
ip_dict = dict(name=ip.name,
id=ip.id,
public_ip=ip.ip_address,
public_ip_allocation_method=str(ip.public_ip_allocation_method)
)
if ip.dns_settings:
ip_dict['dns_settings'] = {
'domain_name_label':ip.dns_settings.domain_name_label,
'fqdn':ip.dns_settings.fqdn
}
return ip_dict
def _nic_to_public_ips_instance(self, nics):
return [self.network_client.public_ip_addresses.get(self.resource_group_name, public_ip_id.split('/')[-1])
for nic_obj in [self.network_client.network_interfaces.get(self.resource_group_name,
nic['dep'].resource_name) for nic in nics]
for public_ip_id in [ip_conf_instance.public_ip_address.id
for ip_conf_instance in nic_obj.ip_configurations
if ip_conf_instance.public_ip_address]]
def main():
AzureRMDeploymentManager()
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
CraigHarris/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/crashrecovery/commit_create_tests/trigger_sql/test_triggersqls.py | 36 | 1128 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mpp.models import SQLTestCase
'''
Trigger sqls for create_commit tests
'''
class TestTriggerSQLClass(SQLTestCase):
'''
This class contains all the sqls that are part of the trigger phase
The sqls in here will get suspended by one of the faults that are triggered in the main run
@gucs gp_create_table_random_default_distribution=off
@gpdiff False
'''
sql_dir = 'sql/'
ans_dir = 'expected/'
out_dir = 'output/'
| apache-2.0 |
glaubitz/fs-uae-debian | launcher/launcher/netplay/netplay_panel.py | 2 | 5735 | import fsui
from launcher.i18n import gettext
from launcher.netplay.irc import LOBBY_CHANNEL
from launcher.netplay.irc_broadcaster import IRCBroadcaster
from launcher.netplay.netplay import Netplay
from launcher.ui.skin import Skin
class NetplayPanel(fsui.Panel):
def __init__(self, parent, header=True):
fsui.Panel.__init__(self, parent)
Skin.set_background_color(self)
self.layout = fsui.VerticalLayout()
if header:
hori_layout = fsui.HorizontalLayout()
self.layout.add(hori_layout, fill=True)
self.layout.add_spacer(0)
label = fsui.HeadingLabel(self, gettext("Net Play"))
hori_layout.add(label, margin=10)
hori_layout.add_spacer(0, expand=True)
# label = fsui.Label(self, "Netplay is currently disabled in the "
# "development versions.")
# self.layout.add(label, margin=10)
# label = fsui.Label(self, "Please use the stable FS-UAE series for "
# "netplay in the meantime.")
# self.layout.add(label, margin=10)
# return
# TODO
gettext("Nick:")
gettext("Connect")
gettext("Disconnect")
# self.nick_label = fsui.Label(self, _("Nick:"))
# hori_layout.add(self.nick_label,
# margin=10, margin_top=0, margin_bottom=0)
#
# self.nick_field = fsui.TextField(self, Settings.get("irc_nick"))
# self.nick_field.set_min_width(130)
# hori_layout.add(self.nick_field, margin_right=10)
# #self.nick_field.on_changed = self.on_nick_change
#
# self.connect_button = fsui.Button(self, _("Connect"))
# hori_layout.add(self.connect_button, margin_right=10)
# #self.connect_button.activated.connect(self.on_connect_button)
#
# self.disconnect_button = fsui.Button(self, _("Disconnect"))
# hori_layout.add(self.disconnect_button, margin_right=10)
# #self.disconnect_button.activated.connect(self.on_disconnect_button)
hori_layout = fsui.HorizontalLayout()
self.layout.add(hori_layout, fill=True, expand=True)
ver_layout = fsui.VerticalLayout()
hori_layout.add(ver_layout, fill=True)
self.channel_list = fsui.ListView(self)
self.channel_list.set_min_width(212)
self.channel_list.on_select_item = self.on_select_channel
ver_layout.add(self.channel_list, fill=True, expand=True, margin=10)
self.nick_list = fsui.ListView(self)
ver_layout.add(self.nick_list, fill=True, expand=True, margin=10)
self.text_area = fsui.TextArea(self, font_family="monospace")
hori_layout.add(
self.text_area, fill=True, expand=True, margin=10, margin_left=0
)
self.input_field = fsui.TextField(self)
self.input_field.activated.connect(self.on_input)
self.layout.add(self.input_field, fill=True, margin=10, margin_top=0)
self.active_channel = LOBBY_CHANNEL
self.input_field.focus()
self.netplay = Netplay()
IRCBroadcaster.add_listener(self)
def on_destroy(self):
print("NetplayPanel.on_destroy")
IRCBroadcaster.remove_listener(self)
self.netplay.disconnect()
def on_show(self):
# FIXME: currently disabled
# return
if not self.netplay.is_connected():
self.netplay.connect()
self.input_field.focus()
def on_select_channel(self, index):
# index = self.channel_list.get_index()
# if index == 0:
# channel = ""
# else:
# assert index is not None
if index is None:
return
channel = self.channel_list.get_item(index)
self.netplay.irc.set_active_channel_name(channel)
self.input_field.focus()
def on_input(self):
command = self.input_field.get_text().strip()
if not command:
return
if self.netplay.handle_command(command):
pass
else:
self.netplay.irc.handle_command(command)
self.input_field.set_text("")
def set_active_channel(self, channel):
if channel == self.active_channel:
return
self.text_area.set_text("")
# self.text_area.append_text(IRC.channel(channel).get_text())
ch = self.netplay.irc.channel(channel)
for i, line in enumerate(ch.lines):
self.text_area.append_text(line, ch.colors[i])
self.active_channel = channel
self.update_nick_list()
for i in range(self.channel_list.get_item_count()):
if self.channel_list.get_item(i) == channel:
self.channel_list.set_index(i)
def update_channel_list(self):
items = sorted(self.netplay.irc.channels.keys())
# items[0] = "IRC ({0})".format(Settings.get_irc_server())
# items[0] = Settings.get_irc_server()
self.channel_list.set_items(items)
def update_nick_list(self):
items = self.netplay.irc.channel(self.active_channel).get_nick_list()
self.nick_list.set_items(items)
def on_irc(self, key, args):
if key == "active_channel":
self.set_active_channel(args["channel"])
elif key == "nick_list":
if args["channel"] == self.active_channel:
self.update_nick_list()
elif key == "channel_list":
self.update_channel_list()
elif key == "message":
if args["channel"] == self.active_channel:
self.text_area.append_text(
args["message"], color=args["color"]
)
self.window.alert()
| gpl-2.0 |
sodafree/backend | build/lib.linux-i686-2.7/django/core/management/commands/flush.py | 81 | 3662 | from optparse import make_option
from django.conf import settings
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_flush, emit_post_sync_signal
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to flush. '
'Defaults to the "default" database.'),
)
help = ('Returns the database to the state it was in immediately after '
'syncdb was executed. This means that all data will be removed '
'from the database, any post-synchronization handlers will be '
're-executed, and the initial_data fixture will be re-installed.')
def handle_noargs(self, **options):
db = options.get('database')
connection = connections[db]
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError:
pass
sql_list = sql_flush(self.style, connection, only_django=True)
if interactive:
confirm = raw_input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed(using=db)
raise CommandError("""Database %s couldn't be flushed. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the expected database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""" % (connection.settings_dict['NAME'], e))
transaction.commit_unless_managed(using=db)
# Emit the post sync signal. This allows individual
# applications to respond as if the database had been
# sync'd from scratch.
all_models = []
for app in models.get_apps():
all_models.extend([
m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)
])
emit_post_sync_signal(set(all_models), verbosity, interactive, db)
# Reinstall the initial_data fixture.
kwargs = options.copy()
kwargs['database'] = db
call_command('loaddata', 'initial_data', **kwargs)
else:
print "Flush cancelled."
| bsd-3-clause |
mycFelix/heron | heron/tools/ui/src/python/handlers/__init__.py | 4 | 1938 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' handler module '''
__all__ = ['handlers']
from heron.tools.ui.src.python.handlers import api
from heron.tools.ui.src.python.handlers.base import BaseHandler
from heron.tools.ui.src.python.handlers.mainhandler import MainHandler
from heron.tools.ui.src.python.handlers.notfound import NotFoundHandler
################################################################################
# Handlers for topology related requests
################################################################################
from heron.tools.ui.src.python.handlers.topology import ContainerFileDataHandler
from heron.tools.ui.src.python.handlers.topology import ContainerFileDownloadHandler
from heron.tools.ui.src.python.handlers.topology import ContainerFileHandler
from heron.tools.ui.src.python.handlers.topology import ContainerFileStatsHandler
from heron.tools.ui.src.python.handlers.topology import ListTopologiesHandler
from heron.tools.ui.src.python.handlers.topology import TopologyPlanHandler
from heron.tools.ui.src.python.handlers.topology import TopologyConfigHandler
from heron.tools.ui.src.python.handlers.topology import TopologyExceptionsPageHandler
| apache-2.0 |
lasko2112/legend-of-hondo | MMOCoreORB/utils/gmock-1.6.0/gtest/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| agpl-3.0 |
Omegaphora/external_chromium_org_tools_grit | grit/xtb_reader.py | 60 | 4925 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Fast and efficient parser for XTB files.
'''
import sys
import xml.sax
import xml.sax.handler
import grit.node.base
class XtbContentHandler(xml.sax.handler.ContentHandler):
'''A content handler that calls a given callback function for each
translation in the XTB file.
'''
def __init__(self, callback, defs=None, debug=False, target_platform=None):
self.callback = callback
self.debug = debug
# 0 if we are not currently parsing a translation, otherwise the message
# ID of that translation.
self.current_id = 0
# Empty if we are not currently parsing a translation, otherwise the
# parts we have for that translation - a list of tuples
# (is_placeholder, text)
self.current_structure = []
# Set to the language ID when we see the <translationbundle> node.
self.language = ''
# Keep track of the if block we're inside. We can't nest ifs.
self.if_expr = None
# Root defines to be used with if expr.
if defs:
self.defines = defs
else:
self.defines = {}
# Target platform for build.
if target_platform:
self.target_platform = target_platform
else:
self.target_platform = sys.platform
def startElement(self, name, attrs):
if name == 'translation':
assert self.current_id == 0 and len(self.current_structure) == 0, (
"Didn't expect a <translation> element here.")
self.current_id = attrs.getValue('id')
elif name == 'ph':
assert self.current_id != 0, "Didn't expect a <ph> element here."
self.current_structure.append((True, attrs.getValue('name')))
elif name == 'translationbundle':
self.language = attrs.getValue('lang')
elif name in ('if', 'then', 'else'):
assert self.if_expr is None, "Can't nest <if> or use <else> in xtb files"
self.if_expr = attrs.getValue('expr')
def endElement(self, name):
if name == 'translation':
assert self.current_id != 0
defs = self.defines
def pp_ifdef(define):
return define in defs
def pp_if(define):
return define in defs and defs[define]
# If we're in an if block, only call the callback (add the translation)
# if the expression is True.
should_run_callback = True
if self.if_expr:
should_run_callback = grit.node.base.Node.EvaluateExpression(
self.if_expr, self.defines, self.target_platform)
if should_run_callback:
self.callback(self.current_id, self.current_structure)
self.current_id = 0
self.current_structure = []
elif name == 'if':
assert self.if_expr is not None
self.if_expr = None
def characters(self, content):
if self.current_id != 0:
# We are inside a <translation> node so just add the characters to our
# structure.
#
# This naive way of handling characters is OK because in the XTB format,
# <ph> nodes are always empty (always <ph name="XXX"/>) and whitespace
# inside the <translation> node should be preserved.
self.current_structure.append((False, content))
class XtbErrorHandler(xml.sax.handler.ErrorHandler):
def error(self, exception):
pass
def fatalError(self, exception):
raise exception
def warning(self, exception):
pass
def Parse(xtb_file, callback_function, defs=None, debug=False,
target_platform=None):
'''Parse xtb_file, making a call to callback_function for every translation
in the XTB file.
The callback function must have the signature as described below. The 'parts'
parameter is a list of tuples (is_placeholder, text). The 'text' part is
either the raw text (if is_placeholder is False) or the name of the placeholder
(if is_placeholder is True).
Args:
xtb_file: open('fr.xtb')
callback_function: def Callback(msg_id, parts): pass
defs: None, or a dictionary of preprocessor definitions.
debug: Default False. Set True for verbose debug output.
target_platform: None, or a sys.platform-like identifier of the build
target platform.
Return:
The language of the XTB, e.g. 'fr'
'''
# Start by advancing the file pointer past the DOCTYPE thing, as the TC
# uses a path to the DTD that only works in Unix.
# TODO(joi) Remove this ugly hack by getting the TC gang to change the
# XTB files somehow?
front_of_file = xtb_file.read(1024)
xtb_file.seek(front_of_file.find('<translationbundle'))
handler = XtbContentHandler(callback=callback_function, defs=defs,
debug=debug, target_platform=target_platform)
xml.sax.parse(xtb_file, handler)
assert handler.language != ''
return handler.language
| bsd-2-clause |
ericdill/scikit-xray | skbeam/io/tests/test_fit2d_save.py | 2 | 3310 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for test output.py saving integrated powder
x-ray diffraction intensities into different file formats.
(Output into different file formats, .chi, .dat, .xye, gsas)
Added a test to check the GSAS file reader and file writer
"""
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from skbeam.io.fit2d import fit2d_save, read_fit2d_msk
from numpy.testing import assert_array_equal
def test_save_output_fit2d():
filename = "function_values"
msk = np.random.random_integers(
0, 1, (np.random.random_integers(0, 200),
np.random.random_integers(0, 200))).astype(bool)
fit2d_save(msk, filename, dir_path=None)
msk2 = read_fit2d_msk(filename)
assert_array_equal(msk2, msk)
os.remove("function_values.msk")
| bsd-3-clause |
torqie/esportscms | node_modules/node-gyp/gyp/pylib/gyp/common.py | 64 | 20173 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
if sys.platform.startswith('zos'):
return 'zos'
if sys.platform.startswith('os390'):
return 'zos'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| lgpl-3.0 |
u55/PyIOTech | examples/DaqTimer.py | 1 | 1941 | """
This program will run for 30 seconds on a python-based timer. Input will be
taken every 0.5 seconds and the output will change every 5 seconds.
Two timer functions will be set up to run at the same time.
"""
from __future__ import print_function, division
import time
from PyIOTech import daq, daqh
# Device name as registered with the Windows driver.
device_name = b'DaqBoard3K0'
# Total time in seconds.
runtime = 30
# Input
inputChan = 0
inputGain = daqh.DgainX1 # No gain
inputFlags = daqh.DafAnalog | daqh.DafUnsigned | daqh.DafBipolar | daqh.DafDifferential
inputFreq = 2 # Twice per second
# Output
outChan = 0 # DAC0 channel
outType = daqh.DddtLocal # DAC channel location
outData = 3.3 # In Volts
outSwitch = True
outFreq = 0.2 # Once per 5 seconds
# Our device's bipolar voltage range is -10.0 V to +10.0 V with gain of 1.
max_voltage = 10.0
# Our device is a 16 bit ADC.
bit_depth = 16
# Data to read
data = []
# Timer functions
def daqOutput(dev, outSwitch):
if outSwitch:
dev.DacWt(outType, outChan, outData)
outSwitch = False
else:
dev.DacWt(outType, outChan, 0.0)
outSwitch = True
return outSwitch
def daqInput(dev):
read = dev.AdcRd(inputChan, inputGain, inputFlags)
# The data list is a global variable and can be accessed anywhere.
data.append(read)
try:
# Setup the device.
dev = daq.daqDevice(device_name)
# Timers
for i in range(int(runtime*outFreq)):
outSwitch = daqOutput(dev, outSwitch)
# Execute 10 times, or for 5 seconds then switch the output.
for v in range(int(inputFreq/outFreq)):
daqInput(dev)
time.sleep(1.0/inputFreq)
for i in data:
# Convert sample from unsigned integer value to bipolar voltage.
print(i*max_voltage*2/(2**bit_depth) - max_voltage)
finally:
dev.Close()
| gpl-2.0 |
bbc/kamaelia | Code/Python/Apps/Europython09/App/MiniAxon-2.py | 3 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class microprocess(object):
def __init__(self):
super(microprocess, self).__init__()
def main(self):
yield 1
class printer(microprocess):
def __init__(self, tag):
super(printer, self).__init__()
self.tag = tag
def main(self):
while 1:
yield 1
print self.tag
class scheduler(microprocess):
def __init__(self):
super(scheduler, self).__init__()
self.active = []
self.newqueue = []
def main(self):
for i in xrange(100):
for current in self.active:
yield 1
try:
result = current.next()
if result is not -1:
self.newqueue.append(current)
except StopIteration:
pass
self.active = self.newqueue
self.newqueue = []
def activateMicroprocess(self, someprocess):
microthread = someprocess.main()
self.newqueue.append(microthread)
X = printer("Hello World")
Y = printer("Game Over") # :-)
myscheduler = scheduler()
myscheduler.activateMicroprocess(X)
myscheduler.activateMicroprocess(Y)
for _ in myscheduler.main():
pass
| apache-2.0 |
fxtentacle/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/ensurebugisopenandassigned.py | 135 | 2061 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
class EnsureBugIsOpenAndAssigned(AbstractStep):
def run(self, state):
bug = self.cached_lookup(state, "bug")
if bug.is_unassigned():
self._tool.bugs.reassign_bug(bug.id())
if bug.is_closed():
# FIXME: We should probably pass this message in somehow?
# Right now this step is only used before PostDiff steps, so this is OK.
self._tool.bugs.reopen_bug(bug.id(), "Reopening to attach new patch.")
| bsd-3-clause |
ShinySide/HispAsian_Lollipop_G6 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
jeblair/GitPython | git/util.py | 1 | 32349 | # utils.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import contextlib
from functools import wraps
import getpass
import logging
import os
import platform
import subprocess
import re
import shutil
import stat
import time
try:
from unittest import SkipTest
except ImportError:
from unittest2 import SkipTest
from gitdb.util import (# NOQA @IgnorePep8
make_sha,
LockedFD, # @UnusedImport
file_contents_ro, # @UnusedImport
file_contents_ro_filepath, # @UnusedImport
LazyMixin, # @UnusedImport
to_hex_sha, # @UnusedImport
to_bin_sha, # @UnusedImport
bin_to_hex, # @UnusedImport
hex_to_bin, # @UnusedImport
)
from git.compat import is_win
import os.path as osp
from .compat import (
MAXSIZE,
defenc,
PY3
)
from .exc import InvalidGitRepositoryError
# NOTE: Some of the unused imports might be used/imported by others.
# Handle once test-cases are back up and running.
# Most of these are unused here, but are for use by git-python modules so these
# don't see gitdb all the time. Flake of course doesn't like it.
__all__ = ("stream_copy", "join_path", "to_native_path_windows", "to_native_path_linux",
"join_path_native", "Stats", "IndexFileSHA1Writer", "Iterable", "IterableList",
"BlockingLockFile", "LockFile", 'Actor', 'get_user_id', 'assure_directory_exists',
'RemoteProgress', 'CallableRemoteProgress', 'rmtree', 'unbare_repo',
'HIDE_WINDOWS_KNOWN_ERRORS')
log = logging.getLogger(__name__)
#: We need an easy way to see if Appveyor TCs start failing,
#: so the errors marked with this var are considered "acknowledged" ones, awaiting remedy,
#: till then, we wish to hide them.
HIDE_WINDOWS_KNOWN_ERRORS = is_win and os.environ.get('HIDE_WINDOWS_KNOWN_ERRORS', True)
HIDE_WINDOWS_FREEZE_ERRORS = is_win and os.environ.get('HIDE_WINDOWS_FREEZE_ERRORS', True)
#{ Utility Methods
def unbare_repo(func):
"""Methods with this decorator raise InvalidGitRepositoryError if they
encounter a bare repository"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self.repo.bare:
raise InvalidGitRepositoryError("Method '%s' cannot operate on bare repositories" % func.__name__)
# END bare method
return func(self, *args, **kwargs)
# END wrapper
return wrapper
@contextlib.contextmanager
def cwd(new_dir):
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield new_dir
finally:
os.chdir(old_dir)
def rmtree(path):
"""Remove the given recursively.
:note: we use shutil rmtree but adjust its behaviour to see whether files that
couldn't be deleted are read-only. Windows will not remove them in that case"""
def onerror(func, path, exc_info):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
try:
func(path) # Will scream if still not possible to delete.
except Exception as ex:
if HIDE_WINDOWS_KNOWN_ERRORS:
raise SkipTest("FIXME: fails with: PermissionError\n %s", ex)
else:
raise
return shutil.rmtree(path, False, onerror)
def rmfile(path):
"""Ensure file deleted also on *Windows* where read-only files need special treatment."""
if osp.isfile(path):
if is_win:
os.chmod(path, 0o777)
os.remove(path)
def stream_copy(source, destination, chunk_size=512 * 1024):
"""Copy all data from the source stream into the destination stream in chunks
of size chunk_size
:return: amount of bytes written"""
br = 0
while True:
chunk = source.read(chunk_size)
destination.write(chunk)
br += len(chunk)
if len(chunk) < chunk_size:
break
# END reading output stream
return br
def join_path(a, *p):
"""Join path tokens together similar to osp.join, but always use
'/' instead of possibly '\' on windows."""
path = a
for b in p:
if len(b) == 0:
continue
if b.startswith('/'):
path += b[1:]
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
# END for each path token to add
return path
if is_win:
def to_native_path_windows(path):
return path.replace('/', '\\')
def to_native_path_linux(path):
return path.replace('\\', '/')
to_native_path = to_native_path_windows
else:
# no need for any work on linux
def to_native_path_linux(path):
return path
to_native_path = to_native_path_linux
def join_path_native(a, *p):
"""
As join path, but makes sure an OS native path is returned. This is only
needed to play it safe on my dear windows and to assure nice paths that only
use '\'"""
return to_native_path(join_path(a, *p))
def assure_directory_exists(path, is_file=False):
"""Assure that the directory pointed to by path exists.
:param is_file: If True, path is assumed to be a file and handled correctly.
Otherwise it must be a directory
:return: True if the directory was created, False if it already existed"""
if is_file:
path = osp.dirname(path)
# END handle file
if not osp.isdir(path):
os.makedirs(path)
return True
return False
def _get_exe_extensions():
try:
winprog_exts = tuple(p.upper() for p in os.environ['PATHEXT'].split(os.pathsep))
except:
winprog_exts = ('.BAT', 'COM', '.EXE')
return winprog_exts
def py_where(program, path=None):
# From: http://stackoverflow.com/a/377028/548792
try:
winprog_exts = tuple(p.upper() for p in os.environ['PATHEXT'].split(os.pathsep))
except:
winprog_exts = is_win and ('.BAT', 'COM', '.EXE') or ()
def is_exec(fpath):
return osp.isfile(fpath) and os.access(fpath, os.X_OK) and (
os.name != 'nt' or not winprog_exts or any(fpath.upper().endswith(ext)
for ext in winprog_exts))
progs = []
if not path:
path = os.environ["PATH"]
for folder in path.split(os.pathsep):
folder = folder.strip('"')
if folder:
exe_path = osp.join(folder, program)
for f in [exe_path] + ['%s%s' % (exe_path, e) for e in winprog_exts]:
if is_exec(f):
progs.append(f)
return progs
def _cygexpath(drive, path):
if osp.isabs(path) and not drive:
## Invoked from `cygpath()` directly with `D:Apps\123`?
# It's an error, leave it alone just slashes)
p = path
else:
p = path and osp.normpath(osp.expandvars(osp.expanduser(path)))
if osp.isabs(p):
if drive:
# Confusing, maybe a remote system should expand vars.
p = path
else:
p = cygpath(p)
elif drive:
p = '/cygdrive/%s/%s' % (drive.lower(), p)
return p.replace('\\', '/')
_cygpath_parsers = (
## See: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
## and: https://www.cygwin.com/cygwin-ug-net/using.html#unc-paths
(re.compile(r"\\\\\?\\UNC\\([^\\]+)\\([^\\]+)(?:\\(.*))?"),
(lambda server, share, rest_path: '//%s/%s/%s' % (server, share, rest_path.replace('\\', '/'))),
False
),
(re.compile(r"\\\\\?\\(\w):[/\\](.*)"),
_cygexpath,
False
),
(re.compile(r"(\w):[/\\](.*)"),
_cygexpath,
False
),
(re.compile(r"file:(.*)", re.I),
(lambda rest_path: rest_path),
True),
(re.compile(r"(\w{2,}:.*)"), # remote URL, do nothing
(lambda url: url),
False),
)
def cygpath(path):
"""Use :meth:`git.cmd.Git.polish_url()` instead, that works on any environment."""
if not path.startswith(('/cygdrive', '//')):
for regex, parser, recurse in _cygpath_parsers:
match = regex.match(path)
if match:
path = parser(*match.groups())
if recurse:
path = cygpath(path)
break
else:
path = _cygexpath(None, path)
return path
_decygpath_regex = re.compile(r"/cygdrive/(\w)(/.*)?")
def decygpath(path):
m = _decygpath_regex.match(path)
if m:
drive, rest_path = m.groups()
path = '%s:%s' % (drive.upper(), rest_path or '')
return path.replace('/', '\\')
#: Store boolean flags denoting if a specific Git executable
#: is from a Cygwin installation (since `cache_lru()` unsupported on PY2).
_is_cygwin_cache = {}
def is_cygwin_git(git_executable):
if not is_win:
return False
#from subprocess import check_output
is_cygwin = _is_cygwin_cache.get(git_executable)
if is_cygwin is None:
is_cygwin = False
try:
git_dir = osp.dirname(git_executable)
if not git_dir:
res = py_where(git_executable)
git_dir = osp.dirname(res[0]) if res else None
## Just a name given, not a real path.
uname_cmd = osp.join(git_dir, 'uname')
process = subprocess.Popen([uname_cmd], stdout=subprocess.PIPE,
universal_newlines=True)
uname_out, _ = process.communicate()
#retcode = process.poll()
is_cygwin = 'CYGWIN' in uname_out
except Exception as ex:
log.debug('Failed checking if running in CYGWIN due to: %r', ex)
_is_cygwin_cache[git_executable] = is_cygwin
return is_cygwin
def get_user_id():
""":return: string identifying the currently active system user as name@node"""
return "%s@%s" % (getpass.getuser(), platform.node())
def finalize_process(proc, **kwargs):
"""Wait for the process (clone, fetch, pull or push) and handle its errors accordingly"""
## TODO: No close proc-streams??
proc.wait(**kwargs)
def expand_path(p, expand_vars=True):
try:
p = osp.expanduser(p)
if expand_vars:
p = osp.expandvars(p)
return osp.normpath(osp.abspath(p))
except:
return None
#} END utilities
#{ Classes
class RemoteProgress(object):
"""
Handler providing an interface to parse progress information emitted by git-push
and git-fetch and to dispatch callbacks allowing subclasses to react to the progress.
"""
_num_op_codes = 9
BEGIN, END, COUNTING, COMPRESSING, WRITING, RECEIVING, RESOLVING, FINDING_SOURCES, CHECKING_OUT = \
[1 << x for x in range(_num_op_codes)]
STAGE_MASK = BEGIN | END
OP_MASK = ~STAGE_MASK
DONE_TOKEN = 'done.'
TOKEN_SEPARATOR = ', '
__slots__ = ('_cur_line',
'_seen_ops',
'error_lines', # Lines that started with 'error:' or 'fatal:'.
'other_lines') # Lines not denoting progress (i.e.g. push-infos).
re_op_absolute = re.compile(r"(remote: )?([\w\s]+):\s+()(\d+)()(.*)")
re_op_relative = re.compile(r"(remote: )?([\w\s]+):\s+(\d+)% \((\d+)/(\d+)\)(.*)")
def __init__(self):
self._seen_ops = list()
self._cur_line = None
self.error_lines = []
self.other_lines = []
def _parse_progress_line(self, line):
"""Parse progress information from the given line as retrieved by git-push
or git-fetch.
- Lines that do not contain progress info are stored in :attr:`other_lines`.
- Lines that seem to contain an error (i.e. start with error: or fatal:) are stored
in :attr:`error_lines`.
:return: list(line, ...) list of lines that could not be processed"""
# handle
# Counting objects: 4, done.
# Compressing objects: 50% (1/2) \rCompressing objects: 100% (2/2) \rCompressing objects: 100% (2/2), done.
self._cur_line = line
if len(self.error_lines) > 0 or self._cur_line.startswith(('error:', 'fatal:')):
self.error_lines.append(self._cur_line)
return []
sub_lines = line.split('\r')
failed_lines = list()
for sline in sub_lines:
# find escape characters and cut them away - regex will not work with
# them as they are non-ascii. As git might expect a tty, it will send them
last_valid_index = None
for i, c in enumerate(reversed(sline)):
if ord(c) < 32:
# its a slice index
last_valid_index = -i - 1
# END character was non-ascii
# END for each character in sline
if last_valid_index is not None:
sline = sline[:last_valid_index]
# END cut away invalid part
sline = sline.rstrip()
cur_count, max_count = None, None
match = self.re_op_relative.match(sline)
if match is None:
match = self.re_op_absolute.match(sline)
if not match:
self.line_dropped(sline)
failed_lines.append(sline)
continue
# END could not get match
op_code = 0
remote, op_name, percent, cur_count, max_count, message = match.groups() # @UnusedVariable
# get operation id
if op_name == "Counting objects":
op_code |= self.COUNTING
elif op_name == "Compressing objects":
op_code |= self.COMPRESSING
elif op_name == "Writing objects":
op_code |= self.WRITING
elif op_name == 'Receiving objects':
op_code |= self.RECEIVING
elif op_name == 'Resolving deltas':
op_code |= self.RESOLVING
elif op_name == 'Finding sources':
op_code |= self.FINDING_SOURCES
elif op_name == 'Checking out files':
op_code |= self.CHECKING_OUT
else:
# Note: On windows it can happen that partial lines are sent
# Hence we get something like "CompreReceiving objects", which is
# a blend of "Compressing objects" and "Receiving objects".
# This can't really be prevented, so we drop the line verbosely
# to make sure we get informed in case the process spits out new
# commands at some point.
self.line_dropped(sline)
# Note: Don't add this line to the failed lines, as we have to silently
# drop it
self.other_lines.extend(failed_lines)
return failed_lines
# END handle op code
# figure out stage
if op_code not in self._seen_ops:
self._seen_ops.append(op_code)
op_code |= self.BEGIN
# END begin opcode
if message is None:
message = ''
# END message handling
message = message.strip()
if message.endswith(self.DONE_TOKEN):
op_code |= self.END
message = message[:-len(self.DONE_TOKEN)]
# END end message handling
message = message.strip(self.TOKEN_SEPARATOR)
self.update(op_code,
cur_count and float(cur_count),
max_count and float(max_count),
message)
# END for each sub line
self.other_lines.extend(failed_lines)
return failed_lines
def new_message_handler(self):
"""
:return:
a progress handler suitable for handle_process_output(), passing lines on to this Progress
handler in a suitable format"""
def handler(line):
return self._parse_progress_line(line.rstrip())
# end
return handler
def line_dropped(self, line):
"""Called whenever a line could not be understood and was therefore dropped."""
pass
def update(self, op_code, cur_count, max_count=None, message=''):
"""Called whenever the progress changes
:param op_code:
Integer allowing to be compared against Operation IDs and stage IDs.
Stage IDs are BEGIN and END. BEGIN will only be set once for each Operation
ID as well as END. It may be that BEGIN and END are set at once in case only
one progress message was emitted due to the speed of the operation.
Between BEGIN and END, none of these flags will be set
Operation IDs are all held within the OP_MASK. Only one Operation ID will
be active per call.
:param cur_count: Current absolute count of items
:param max_count:
The maximum count of items we expect. It may be None in case there is
no maximum number of items or if it is (yet) unknown.
:param message:
In case of the 'WRITING' operation, it contains the amount of bytes
transferred. It may possibly be used for other purposes as well.
You may read the contents of the current line in self._cur_line"""
pass
class CallableRemoteProgress(RemoteProgress):
"""An implementation forwarding updates to any callable"""
__slots__ = ('_callable')
def __init__(self, fn):
self._callable = fn
super(CallableRemoteProgress, self).__init__()
def update(self, *args, **kwargs):
self._callable(*args, **kwargs)
class Actor(object):
"""Actors hold information about a person acting on the repository. They
can be committers and authors or anything with a name and an email as
mentioned in the git log entries."""
# PRECOMPILED REGEX
name_only_regex = re.compile(r'<(.+)>')
name_email_regex = re.compile(r'(.*) <(.+?)>')
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_name = "GIT_AUTHOR_NAME"
env_author_email = "GIT_AUTHOR_EMAIL"
env_committer_name = "GIT_COMMITTER_NAME"
env_committer_email = "GIT_COMMITTER_EMAIL"
# CONFIGURATION KEYS
conf_name = 'name'
conf_email = 'email'
__slots__ = ('name', 'email')
def __init__(self, name, email):
self.name = name
self.email = email
def __eq__(self, other):
return self.name == other.name and self.email == other.email
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.name, self.email))
def __str__(self):
return self.name
def __repr__(self):
return u'<git.Actor "%s <%s>">' % (self.name, self.email)
@classmethod
def _from_string(cls, string):
"""Create an Actor from a string.
:param string: is the string, which is expected to be in regular git format
John Doe <jdoe@example.com>
:return: Actor """
m = cls.name_email_regex.search(string)
if m:
name, email = m.groups()
return Actor(name, email)
else:
m = cls.name_only_regex.search(string)
if m:
return Actor(m.group(1), None)
else:
# assume best and use the whole string as name
return Actor(string, None)
# END special case name
# END handle name/email matching
@classmethod
def _main_actor(cls, env_name, env_email, config_reader=None):
actor = Actor('', '')
default_email = get_user_id()
default_name = default_email.split('@')[0]
for attr, evar, cvar, default in (('name', env_name, cls.conf_name, default_name),
('email', env_email, cls.conf_email, default_email)):
try:
val = os.environ[evar]
if not PY3:
val = val.decode(defenc)
# end assure we don't get 'invalid strings'
setattr(actor, attr, val)
except KeyError:
if config_reader is not None:
setattr(actor, attr, config_reader.get_value('user', cvar, default))
# END config-reader handling
if not getattr(actor, attr):
setattr(actor, attr, default)
# END handle name
# END for each item to retrieve
return actor
@classmethod
def committer(cls, config_reader=None):
"""
:return: Actor instance corresponding to the configured committer. It behaves
similar to the git implementation, such that the environment will override
configuration values of config_reader. If no value is set at all, it will be
generated
:param config_reader: ConfigReader to use to retrieve the values from in case
they are not set in the environment"""
return cls._main_actor(cls.env_committer_name, cls.env_committer_email, config_reader)
@classmethod
def author(cls, config_reader=None):
"""Same as committer(), but defines the main author. It may be specified in the environment,
but defaults to the committer"""
return cls._main_actor(cls.env_author_name, cls.env_author_email, config_reader)
class Stats(object):
"""
Represents stat information as presented by git at the end of a merge. It is
created from the output of a diff operation.
``Example``::
c = Commit( sha1 )
s = c.stats
s.total # full-stat-dict
s.files # dict( filepath : stat-dict )
``stat-dict``
A dictionary with the following keys and values::
deletions = number of deleted lines as int
insertions = number of inserted lines as int
lines = total number of lines changed as int, or deletions + insertions
``full-stat-dict``
In addition to the items in the stat-dict, it features additional information::
files = number of changed files as int"""
__slots__ = ("total", "files")
def __init__(self, total, files):
self.total = total
self.files = files
@classmethod
def _list_from_string(cls, repo, text):
"""Create a Stat object from output retrieved by git-diff.
:return: git.Stat"""
hsh = {'total': {'insertions': 0, 'deletions': 0, 'lines': 0, 'files': 0}, 'files': dict()}
for line in text.splitlines():
(raw_insertions, raw_deletions, filename) = line.split("\t")
insertions = raw_insertions != '-' and int(raw_insertions) or 0
deletions = raw_deletions != '-' and int(raw_deletions) or 0
hsh['total']['insertions'] += insertions
hsh['total']['deletions'] += deletions
hsh['total']['lines'] += insertions + deletions
hsh['total']['files'] += 1
hsh['files'][filename.strip()] = {'insertions': insertions,
'deletions': deletions,
'lines': insertions + deletions}
return Stats(hsh['total'], hsh['files'])
class IndexFileSHA1Writer(object):
"""Wrapper around a file-like object that remembers the SHA1 of
the data written to it. It will write a sha when the stream is closed
or if the asked for explicitly using write_sha.
Only useful to the indexfile
:note: Based on the dulwich project"""
__slots__ = ("f", "sha1")
def __init__(self, f):
self.f = f
self.sha1 = make_sha(b"")
def write(self, data):
self.sha1.update(data)
return self.f.write(data)
def write_sha(self):
sha = self.sha1.digest()
self.f.write(sha)
return sha
def close(self):
sha = self.write_sha()
self.f.close()
return sha
def tell(self):
return self.f.tell()
class LockFile(object):
"""Provides methods to obtain, check for, and release a file based lock which
should be used to handle concurrent access to the same file.
As we are a utility class to be derived from, we only use protected methods.
Locks will automatically be released on destruction"""
__slots__ = ("_file_path", "_owns_lock")
def __init__(self, file_path):
self._file_path = file_path
self._owns_lock = False
def __del__(self):
self._release_lock()
def _lock_file_path(self):
""":return: Path to lockfile"""
return "%s.lock" % (self._file_path)
def _has_lock(self):
""":return: True if we have a lock and if the lockfile still exists
:raise AssertionError: if our lock-file does not exist"""
return self._owns_lock
def _obtain_lock_or_raise(self):
"""Create a lock file as flag for other instances, mark our instance as lock-holder
:raise IOError: if a lock was already present or a lock file could not be written"""
if self._has_lock():
return
lock_file = self._lock_file_path()
if osp.isfile(lock_file):
raise IOError("Lock for file %r did already exist, delete %r in case the lock is illegal" %
(self._file_path, lock_file))
try:
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
if is_win:
flags |= os.O_SHORT_LIVED
fd = os.open(lock_file, flags, 0)
os.close(fd)
except OSError as e:
raise IOError(str(e))
self._owns_lock = True
def _obtain_lock(self):
"""The default implementation will raise if a lock cannot be obtained.
Subclasses may override this method to provide a different implementation"""
return self._obtain_lock_or_raise()
def _release_lock(self):
"""Release our lock if we have one"""
if not self._has_lock():
return
# if someone removed our file beforhand, lets just flag this issue
# instead of failing, to make it more usable.
lfp = self._lock_file_path()
try:
rmfile(lfp)
except OSError:
pass
self._owns_lock = False
class BlockingLockFile(LockFile):
"""The lock file will block until a lock could be obtained, or fail after
a specified timeout.
:note: If the directory containing the lock was removed, an exception will
be raised during the blocking period, preventing hangs as the lock
can never be obtained."""
__slots__ = ("_check_interval", "_max_block_time")
def __init__(self, file_path, check_interval_s=0.3, max_block_time_s=MAXSIZE):
"""Configure the instance
:parm check_interval_s:
Period of time to sleep until the lock is checked the next time.
By default, it waits a nearly unlimited time
:parm max_block_time_s: Maximum amount of seconds we may lock"""
super(BlockingLockFile, self).__init__(file_path)
self._check_interval = check_interval_s
self._max_block_time = max_block_time_s
def _obtain_lock(self):
"""This method blocks until it obtained the lock, or raises IOError if
it ran out of time or if the parent directory was not available anymore.
If this method returns, you are guaranteed to own the lock"""
starttime = time.time()
maxtime = starttime + float(self._max_block_time)
while True:
try:
super(BlockingLockFile, self)._obtain_lock()
except IOError:
# synity check: if the directory leading to the lockfile is not
# readable anymore, raise an exception
curtime = time.time()
if not osp.isdir(osp.dirname(self._lock_file_path())):
msg = "Directory containing the lockfile %r was not readable anymore after waiting %g seconds" % (
self._lock_file_path(), curtime - starttime)
raise IOError(msg)
# END handle missing directory
if curtime >= maxtime:
msg = "Waited %g seconds for lock at %r" % (maxtime - starttime, self._lock_file_path())
raise IOError(msg)
# END abort if we wait too long
time.sleep(self._check_interval)
else:
break
# END endless loop
class IterableList(list):
"""
List of iterable objects allowing to query an object by id or by named index::
heads = repo.heads
heads.master
heads['master']
heads[0]
It requires an id_attribute name to be set which will be queried from its
contained items to have a means for comparison.
A prefix can be specified which is to be used in case the id returned by the
items always contains a prefix that does not matter to the user, so it
can be left out."""
__slots__ = ('_id_attr', '_prefix')
def __new__(cls, id_attr, prefix=''):
return super(IterableList, cls).__new__(cls)
def __init__(self, id_attr, prefix=''):
self._id_attr = id_attr
self._prefix = prefix
def __contains__(self, attr):
# first try identity match for performance
rval = list.__contains__(self, attr)
if rval:
return rval
# END handle match
# otherwise make a full name search
try:
getattr(self, attr)
return True
except (AttributeError, TypeError):
return False
# END handle membership
def __getattr__(self, attr):
attr = self._prefix + attr
for item in self:
if getattr(item, self._id_attr) == attr:
return item
# END for each item
return list.__getattribute__(self, attr)
def __getitem__(self, index):
if isinstance(index, int):
return list.__getitem__(self, index)
try:
return getattr(self, index)
except AttributeError:
raise IndexError("No item found with id %r" % (self._prefix + index))
# END handle getattr
def __delitem__(self, index):
delindex = index
if not isinstance(index, int):
delindex = -1
name = self._prefix + index
for i, item in enumerate(self):
if getattr(item, self._id_attr) == name:
delindex = i
break
# END search index
# END for each item
if delindex == -1:
raise IndexError("Item with name %s not found" % name)
# END handle error
# END get index to delete
list.__delitem__(self, delindex)
class Iterable(object):
"""Defines an interface for iterable items which is to assure a uniform
way to retrieve and iterate items within the git repository"""
__slots__ = tuple()
_id_attribute_ = "attribute that most suitably identifies your instance"
@classmethod
def list_items(cls, repo, *args, **kwargs):
"""
Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances"""
out_list = IterableList(cls._id_attribute_)
out_list.extend(cls.iter_items(repo, *args, **kwargs))
return out_list
@classmethod
def iter_items(cls, repo, *args, **kwargs):
"""For more information about the arguments, see list_items
:return: iterator yielding Items"""
raise NotImplementedError("To be implemented by Subclass")
#} END classes
class NullHandler(logging.Handler):
def emit(self, record):
pass
# In Python 2.6, there is no NullHandler yet. Let's monkey-patch it for a workaround.
if not hasattr(logging, 'NullHandler'):
logging.NullHandler = NullHandler
| bsd-3-clause |
kxliugang/edx-platform | cms/djangoapps/contentstore/views/tests/test_textbooks.py | 163 | 13515 | import json
from unittest import TestCase
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from contentstore.views.course import (
validate_textbooks_json, validate_textbook_json, TextbookValidationError)
class TextbookIndexTestCase(CourseTestCase):
"Test cases for the textbook index page"
def setUp(self):
"Set the URL for tests"
super(TextbookIndexTestCase, self).setUp()
self.url = reverse_course_url('textbooks_list_handler', self.course.id)
def test_view_index(self):
"Basic check that the textbook index page responds correctly"
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 200)
# we don't have resp.context right now,
# due to bugs in our testing harness :(
if resp.context:
self.assertEqual(resp.context['course'], self.course)
def test_view_index_xhr(self):
"Check that we get a JSON response when requested via AJAX"
resp = self.client.get(
self.url,
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(self.course.pdf_textbooks, obj)
def test_view_index_xhr_content(self):
"Check that the response maps to the content of the modulestore"
content = [
{
"tab_title": "my textbook",
"url": "/abc.pdf",
"id": "992"
}, {
"tab_title": "pineapple",
"id": "0pineapple",
"chapters": [
{
"title": "The Fruit",
"url": "/a/b/fruit.pdf",
}, {
"title": "The Legend",
"url": "/b/c/legend.pdf",
}
]
}
]
self.course.pdf_textbooks = content
self.save_course()
resp = self.client.get(
self.url,
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(content, obj)
def test_view_index_xhr_put(self):
"Check that you can save information to the server"
textbooks = [
{"tab_title": "Hi, mom!"},
{"tab_title": "Textbook 2"},
]
resp = self.client.put(
self.url,
data=json.dumps(textbooks),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(resp.status_code, 200)
# should be the same, except for added ID
no_ids = []
self.reload_course()
for textbook in self.course.pdf_textbooks:
del textbook["id"]
no_ids.append(textbook)
self.assertEqual(no_ids, textbooks)
def test_view_index_xhr_put_invalid(self):
"Check that you can't save invalid JSON"
resp = self.client.put(
self.url,
data="invalid",
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(resp.status_code, 400)
obj = json.loads(resp.content)
self.assertIn("error", obj)
class TextbookCreateTestCase(CourseTestCase):
"Test cases for creating a new PDF textbook"
def setUp(self):
"Set up a url and some textbook content for tests"
super(TextbookCreateTestCase, self).setUp()
self.url = reverse_course_url('textbooks_list_handler', self.course.id)
self.textbook = {
"tab_title": "Economics",
"chapters": {
"title": "Chapter 1",
"url": "/a/b/c/ch1.pdf",
}
}
def test_happy_path(self):
"Test that you can create a textbook"
resp = self.client.post(
self.url,
data=json.dumps(self.textbook),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(resp.status_code, 201)
self.assertIn("Location", resp)
textbook = json.loads(resp.content)
self.assertIn("id", textbook)
del textbook["id"]
self.assertEqual(self.textbook, textbook)
def test_valid_id(self):
"Textbook IDs must begin with a number; try a valid one"
self.textbook["id"] = "7x5"
resp = self.client.post(
self.url,
data=json.dumps(self.textbook),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(resp.status_code, 201)
textbook = json.loads(resp.content)
self.assertEqual(self.textbook, textbook)
def test_invalid_id(self):
"Textbook IDs must begin with a number; try an invalid one"
self.textbook["id"] = "xxx"
resp = self.client.post(
self.url,
data=json.dumps(self.textbook),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(resp.status_code, 400)
self.assertNotIn("Location", resp)
class TextbookDetailTestCase(CourseTestCase):
"Test cases for the `textbook_detail_handler` view"
def setUp(self):
"Set some useful content and URLs for tests"
super(TextbookDetailTestCase, self).setUp()
self.textbook1 = {
"tab_title": "Economics",
"id": 1,
"chapters": {
"title": "Chapter 1",
"url": "/a/b/c/ch1.pdf",
}
}
self.url1 = self.get_details_url("1")
self.textbook2 = {
"tab_title": "Algebra",
"id": 2,
"chapters": {
"title": "Chapter 11",
"url": "/a/b/ch11.pdf",
}
}
self.url2 = self.get_details_url("2")
self.course.pdf_textbooks = [self.textbook1, self.textbook2]
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
self.save_course()
self.url_nonexist = self.get_details_url("1=20")
def get_details_url(self, textbook_id):
"""
Returns the URL for textbook detail handler.
"""
return reverse_course_url(
'textbooks_detail_handler',
self.course.id,
kwargs={'textbook_id': textbook_id}
)
def test_get_1(self):
"Get the first textbook"
resp = self.client.get(self.url1)
self.assertEqual(resp.status_code, 200)
compare = json.loads(resp.content)
self.assertEqual(compare, self.textbook1)
def test_get_2(self):
"Get the second textbook"
resp = self.client.get(self.url2)
self.assertEqual(resp.status_code, 200)
compare = json.loads(resp.content)
self.assertEqual(compare, self.textbook2)
def test_get_nonexistant(self):
"Get a nonexistent textbook"
resp = self.client.get(self.url_nonexist)
self.assertEqual(resp.status_code, 404)
def test_delete(self):
"Delete a textbook by ID"
resp = self.client.delete(self.url1)
self.assertEqual(resp.status_code, 204)
self.reload_course()
self.assertEqual(self.course.pdf_textbooks, [self.textbook2])
def test_delete_nonexistant(self):
"Delete a textbook by ID, when the ID doesn't match an existing textbook"
resp = self.client.delete(self.url_nonexist)
self.assertEqual(resp.status_code, 404)
self.reload_course()
self.assertEqual(self.course.pdf_textbooks, [self.textbook1, self.textbook2])
def test_create_new_by_id(self):
"Create a textbook by ID"
textbook = {
"tab_title": "a new textbook",
"url": "supercool.pdf",
"id": "1supercool",
}
url = self.get_details_url("1supercool")
resp = self.client.post(
url,
data=json.dumps(textbook),
content_type="application/json",
)
self.assertEqual(resp.status_code, 201)
resp2 = self.client.get(url)
self.assertEqual(resp2.status_code, 200)
compare = json.loads(resp2.content)
self.assertEqual(compare, textbook)
self.reload_course()
self.assertEqual(
self.course.pdf_textbooks,
[self.textbook1, self.textbook2, textbook]
)
def test_replace_by_id(self):
"Create a textbook by ID, overwriting an existing textbook ID"
replacement = {
"tab_title": "You've been replaced!",
"url": "supercool.pdf",
"id": "2",
}
resp = self.client.post(
self.url2,
data=json.dumps(replacement),
content_type="application/json",
)
self.assertEqual(resp.status_code, 201)
resp2 = self.client.get(self.url2)
self.assertEqual(resp2.status_code, 200)
compare = json.loads(resp2.content)
self.assertEqual(compare, replacement)
course = self.store.get_item(self.course.location)
self.assertEqual(
course.pdf_textbooks,
[self.textbook1, replacement]
)
class TextbookValidationTestCase(TestCase):
"Tests for the code to validate the structure of a PDF textbook"
def setUp(self):
"Set some useful content for tests"
super(TextbookValidationTestCase, self).setUp()
self.tb1 = {
"tab_title": "Hi, mom!",
"url": "/mom.pdf"
}
self.tb2 = {
"tab_title": "Hi, dad!",
"chapters": [
{
"title": "Baseball",
"url": "baseball.pdf",
}, {
"title": "Basketball",
"url": "crazypants.pdf",
}
]
}
self.textbooks = [self.tb1, self.tb2]
def test_happy_path_plural(self):
"Test that the plural validator works properly"
result = validate_textbooks_json(json.dumps(self.textbooks))
self.assertEqual(self.textbooks, result)
def test_happy_path_singular_1(self):
"Test that the singular validator works properly"
result = validate_textbook_json(json.dumps(self.tb1))
self.assertEqual(self.tb1, result)
def test_happy_path_singular_2(self):
"Test that the singular validator works properly, with different data"
result = validate_textbook_json(json.dumps(self.tb2))
self.assertEqual(self.tb2, result)
def test_valid_id(self):
"Test that a valid ID doesn't trip the validator, and comes out unchanged"
self.tb1["id"] = 1
result = validate_textbook_json(json.dumps(self.tb1))
self.assertEqual(self.tb1, result)
def test_invalid_id(self):
"Test that an invalid ID trips the validator"
self.tb1["id"] = "abc"
with self.assertRaises(TextbookValidationError):
validate_textbook_json(json.dumps(self.tb1))
def test_invalid_json_plural(self):
"Test that invalid JSON trips the plural validator"
with self.assertRaises(TextbookValidationError):
validate_textbooks_json("[{'abc'}]")
def test_invalid_json_singular(self):
"Test that invalid JSON trips the singluar validator"
with self.assertRaises(TextbookValidationError):
validate_textbook_json("[{1]}")
def test_wrong_json_plural(self):
"Test that a JSON object trips the plural validators (requires a list)"
with self.assertRaises(TextbookValidationError):
validate_textbooks_json('{"tab_title": "Hi, mom!"}')
def test_wrong_json_singular(self):
"Test that a JSON list trips the plural validators (requires an object)"
with self.assertRaises(TextbookValidationError):
validate_textbook_json('[{"tab_title": "Hi, mom!"}, {"tab_title": "Hi, dad!"}]')
def test_no_tab_title_plural(self):
"Test that `tab_title` is required for the plural validator"
with self.assertRaises(TextbookValidationError):
validate_textbooks_json('[{"url": "/textbook.pdf"}]')
def test_no_tab_title_singular(self):
"Test that `tab_title` is required for the singular validator"
with self.assertRaises(TextbookValidationError):
validate_textbook_json('{"url": "/textbook.pdf"}')
def test_duplicate_ids(self):
"Test that duplicate IDs in the plural validator trips the validator"
textbooks = [{
"tab_title": "name one",
"url": "one.pdf",
"id": 1,
}, {
"tab_title": "name two",
"url": "two.pdf",
"id": 1,
}]
with self.assertRaises(TextbookValidationError):
validate_textbooks_json(json.dumps(textbooks))
| agpl-3.0 |
tchernomax/ansible | lib/ansible/modules/database/misc/riak.py | 50 | 7213 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: riak
short_description: This module handles some common Riak operations
description:
- This module can be used to join nodes to a cluster, check
the status of the cluster.
version_added: "1.2"
author:
- "James Martin (@jsmartin)"
- "Drew Kerrigan (@drewkerrigan)"
options:
command:
description:
- The command you would like to perform against the cluster.
choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
config_dir:
description:
- The path to the riak configuration directory
default: /etc/riak
http_conn:
description:
- The ip address and port that is listening for Riak HTTP queries
default: 127.0.0.1:8098
target_node:
description:
- The target node for certain operations (join, ping)
default: riak@127.0.0.1
wait_for_handoffs:
description:
- Number of seconds to wait for handoffs to complete.
wait_for_ring:
description:
- Number of seconds to wait for all nodes to agree on the ring.
wait_for_service:
description:
- Waits for a riak service to come online before continuing.
choices: ['kv']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
version_added: 1.5.1
'''
EXAMPLES = '''
# Join's a Riak node to another node
- riak:
command: join
target_node: riak@10.1.1.1
# Wait for handoffs to finish. Use with async and poll.
- riak:
wait_for_handoffs: yes
# Wait for riak_kv service to startup
- riak:
wait_for_service: kv
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def ring_check(module, riak_admin_bin):
cmd = '%s ringready' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0 and 'TRUE All nodes agree on the ring' in out:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=False, default=None, choices=[
'ping', 'kv_test', 'join', 'plan', 'commit']),
config_dir=dict(default='/etc/riak', type='path'),
http_conn=dict(required=False, default='127.0.0.1:8098'),
target_node=dict(default='riak@127.0.0.1', required=False),
wait_for_handoffs=dict(default=False, type='int'),
wait_for_ring=dict(default=False, type='int'),
wait_for_service=dict(
required=False, default=None, choices=['kv']),
validate_certs=dict(default='yes', type='bool'))
)
command = module.params.get('command')
http_conn = module.params.get('http_conn')
target_node = module.params.get('target_node')
wait_for_handoffs = module.params.get('wait_for_handoffs')
wait_for_ring = module.params.get('wait_for_ring')
wait_for_service = module.params.get('wait_for_service')
# make sure riak commands are on the path
riak_bin = module.get_bin_path('riak')
riak_admin_bin = module.get_bin_path('riak-admin')
timeout = time.time() + 120
while True:
if time.time() > timeout:
module.fail_json(msg='Timeout, could not fetch Riak stats.')
(response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
if info['status'] == 200:
stats_raw = response.read()
break
time.sleep(5)
# here we attempt to load those stats,
try:
stats = json.loads(stats_raw)
except:
module.fail_json(msg='Could not parse Riak stats.')
node_name = stats['nodename']
nodes = stats['ring_members']
ring_size = stats['ring_creation_size']
rc, out, err = module.run_command([riak_bin, 'version'])
version = out.strip()
result = dict(node_name=node_name,
nodes=nodes,
ring_size=ring_size,
version=version)
if command == 'ping':
cmd = '%s ping %s' % (riak_bin, target_node)
rc, out, err = module.run_command(cmd)
if rc == 0:
result['ping'] = out
else:
module.fail_json(msg=out)
elif command == 'kv_test':
cmd = '%s test' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['kv_test'] = out
else:
module.fail_json(msg=out)
elif command == 'join':
if nodes.count(node_name) == 1 and len(nodes) > 1:
result['join'] = 'Node is already in cluster or staged to be in cluster.'
else:
cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
rc, out, err = module.run_command(cmd)
if rc == 0:
result['join'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'plan':
cmd = '%s cluster plan' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['plan'] = out
if 'Staged Changes' in out:
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'commit':
cmd = '%s cluster commit' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['commit'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
# this could take a while, recommend to run in async mode
if wait_for_handoffs:
timeout = time.time() + wait_for_handoffs
while True:
cmd = '%s transfers' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if 'No transfers active' in out:
result['handoffs'] = 'No transfers active.'
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for handoffs.')
if wait_for_service:
cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name]
rc, out, err = module.run_command(cmd)
result['service'] = out
if wait_for_ring:
timeout = time.time() + wait_for_ring
while True:
if ring_check(module, riak_admin_bin):
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
result['ring_ready'] = ring_check(module, riak_admin_bin)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mandeepdhami/horizon | openstack_dashboard/dashboards/project/volumes/volumes/tests.py | 7 | 69956 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import django
from django.core.urlresolvers import reverse
from django.forms import widgets
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes \
.volumes import tables
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
VOLUME_INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_VOLUMES_TAB_URL = reverse('horizon:project:volumes:volumes_tab')
SEARCH_OPTS = dict(status=api.cinder.VOLUME_STATE_AVAILABLE)
class VolumeViewTests(test.TestCase):
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume(self):
volume = self.cinder_volumes.first()
volume_type = self.volume_types.first()
az = self.cinder_availability_zones.first().zoneName
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
formData['type'],
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=formData['availability_zone'],
source_volid=None)\
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_without_name(self):
volume = self.cinder_volumes.first()
volume_type = self.volume_types.first()
az = self.cinder_availability_zones.first().zoneName
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': '',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
formData['type'],
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=formData['availability_zone'],
source_volid=None)\
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'no_source_type',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': self.images.first().id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_get',
'volume_get',
'volume_type_list'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'volume_snapshot_get',
'volume_snapshot_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_volume(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A copy of a volume',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'volume_source',
'volume_source': volume.id}
cinder.volume_list(IsA(http.HttpRequest), search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volumes.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_get(IsA(http.HttpRequest),
volume.id).AndReturn(self.cinder_volumes.first())
cinder.extension_supported(IsA(http.HttpRequest),
'AvailabilityZones').AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
redirect_url = VOLUME_VOLUMES_TAB_URL
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_snapshot_get',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'snapshot_source',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_snapshot_get',
'volume_type_list',
'volume_get'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_invalid_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GB)")
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 40,
'type': '',
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'volume_list',
'volume_snapshot_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 30,
'type': '',
'volume_source_type': 'image_source',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)) \
.AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_under_image_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 1, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
# in django 1.6 filesizeformat replaces all spaces with
# non-breaking space characters
if django.VERSION >= (1, 6):
msg = (u"The volume size cannot be less than the "
u"image size (20.0\xa0GB)")
else:
msg = (u"The volume size cannot be less than the "
u"image size (20.0 GB)")
self.assertFormError(res, 'form', None, msg)
@test.create_stubs({cinder: ('volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def _test_create_volume_from_image_under_image_min_disk_size(self, image):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 5, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"image minimum disk size (30GB)")
def test_create_volume_from_image_under_image_min_disk_size(self):
image = self.images.get(name="protected_images")
image.min_disk = 30
self._test_create_volume_from_image_under_image_min_disk_size(image)
def test_create_volume_from_image_under_image_property_min_disk_size(self):
image = self.images.get(name="protected_images")
image.min_disk = 0
image.properties['min_disk'] = 30
self._test_create_volume_from_image_under_image_min_disk_size(image)
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_gb_used_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 80,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'This Volume Is Huge!',
'description': u'This is a volume that is just too big!',
'method': u'CreateForm',
'size': 5000}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'A volume of 5000GB cannot be created as you only'
' have 20GB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_number_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': len(self.cinder_volumes.list())}
formData = {'name': u'Too Many...',
'description': u'We have no volumes left!',
'method': u'CreateForm',
'size': 10}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_snapshot_list',
'volume_backup_supported',
'volume_delete',),
api.nova: ('server_list',)})
def test_delete_volume(self):
volumes = self.cinder_volumes.list()
volume = self.cinder_volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
cinder.volume_delete(IsA(http.HttpRequest), volume.id)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertIn("Scheduled deletion of Volume: Volume name",
[m.message for m in res.context['messages']])
@test.create_stubs({cinder: ('volume_get',
'tenant_absolute_limits')})
def test_delete_volume_with_snap_no_action_item(self):
volume = self.cinder_volumes.get(name='Volume name')
setattr(volume, 'has_snapshot', True)
limits = self.cinder_limits['absolute']
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest)). \
MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertNotContains(res, 'Delete Volume')
self.assertNotContains(res, 'delete')
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '/dev/vdb',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
msg = 'Volume %s on instance %s' % (volume.name, servers[0].name)
self.assertContains(res, msg)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
1)
self.assertEqual(res.status_code, 200)
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
self.assertFalse(form.fields['device'].required)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments_auto_device_name(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
self.assertFalse(form.fields['device'].required)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments_cannot_set_mount_point(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
# Assert the device field is hidden.
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.HiddenInput))
@test.create_stubs({cinder: ('volume_get',),
api.nova: ('server_list',)})
def test_edit_attachments_attached_volume(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
server = servers[0]
volume = self.cinder_volumes.list()[0]
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
self.assertEqual(res.context['form'].fields['instance']._choices[0][1],
"Select an instance")
self.assertEqual(len(res.context['form'].fields['instance'].choices),
2)
self.assertEqual(res.context['form'].fields['instance']._choices[1][0],
server.id)
self.assertEqual(res.status_code, 200)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',)})
def test_create_snapshot_button_disabled_when_quota_exceeded(self):
limits = {'maxTotalSnapshots': 1}
limits['totalSnapshotsUsed'] = limits['maxTotalSnapshots']
volume = self.cinder_volumes.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).AndReturn(limits)
self.mox.ReplayAll()
create_link = tables.CreateSnapshot()
url = reverse(create_link.get_link_url(), args=[volume.id])
res_url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(res_url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' class=\"%s disabled\" "\
"id=\"volumes__row_%s__action_snapshots\">%s</a>" \
% (url, " ".join(classes), volume.id, link_name)
self.assertContains(
res, expected_string, html=True,
msg_prefix="The create snapshot button is not disabled")
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_snapshot_list',
'volume_backup_supported',),
api.nova: ('server_list',)})
def test_create_button_disabled_when_quota_exceeded(self):
limits = self.cinder_limits['absolute']
limits['totalVolumesUsed'] = limits['maxTotalVolumes']
volumes = self.cinder_volumes.list()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
self.assertTemplateUsed(res, 'project/volumes/index.html')
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, self.cinder_volumes.list())
create_link = tables.CreateVolume()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='volumes__action_create' data-update-url=" \
"'/project/volumes/?action=create&table=volumes'> "\
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',),
api.nova: ('server_get',)})
def test_detail_view(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res, "<h1>Volume Details: Volume name</h1>",
1, 200)
self.assertContains(res, "<dd>Volume name</dd>", 1, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
self.assertContains(res, "<dd>40 GB</dd>", 1, 200)
self.assertContains(res,
("<a href=\"/project/instances/1/\">%s</a>"
% server.name),
1,
200)
self.assertNoMessages()
@test.create_stubs({cinder: ('volume_get',
'volume_get_encryption_metadata'), })
def test_encryption_detail_view_encrypted(self):
enc_meta = self.cinder_volume_encryption.first()
volume = self.cinder_volumes.get(name='my_volume2')
cinder.volume_get_encryption_metadata(
IsA(http.HttpRequest), volume.id).AndReturn(enc_meta)
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"<h1>Volume Encryption Details: "
"%s</h1>" % volume.name,
1, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.volume_type, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.provider, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.control_location, 1,
200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.cipher, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.key_size, 1, 200)
self.assertNoMessages()
@test.create_stubs({cinder: ('volume_get',
'volume_get_encryption_metadata'), })
def test_encryption_detail_view_unencrypted(self):
enc_meta = self.cinder_volume_encryption.list()[1]
volume = self.cinder_volumes.get(name='my_volume2')
cinder.volume_get_encryption_metadata(
IsA(http.HttpRequest), volume.id).AndReturn(enc_meta)
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"<h1>Volume Encryption Details: "
"%s</h1>" % volume.name,
1, 200)
self.assertContains(res, "<h3>Volume is Unencrypted</h3>", 1, 200)
self.assertNoMessages()
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',)})
def test_get_data(self):
volume = self.cinder_volumes.get(name='v2_volume')
volume._apiresource.name = ""
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertEqual(volume.name, volume.id)
@test.create_stubs({cinder: ('volume_get',)})
def test_detail_view_with_exception(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume(self):
volume = self.cinder_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
volume.name,
volume.description)
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
False)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume_without_name(self):
volume = self.cinder_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
'',
volume.description)
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
False)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': '',
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume_bootable_flag(self):
volume = self.cinder_bootable_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
volume.name,
'update bootable flag')
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
True)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': 'update bootable flag',
'bootable': True}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_upload_to_image',
'volume_get')})
def test_upload_to_image(self):
volume = self.cinder_volumes.get(name='v2_volume')
loaded_resp = {'container_format': 'bare',
'disk_format': 'raw',
'id': '741fe2ac-aa2f-4cec-82a9-4994896b43fb',
'image_id': '2faa080b-dd56-4bf0-8f0a-0d4627d8f306',
'image_name': 'test',
'size': '2',
'status': 'uploading'}
form_data = {'id': volume.id,
'name': volume.name,
'image_name': 'testimage',
'force': True,
'container_format': 'bare',
'disk_format': 'raw'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_upload_to_image(
IsA(http.HttpRequest),
form_data['id'],
form_data['force'],
form_data['image_name'],
form_data['container_format'],
form_data['disk_format']).AndReturn(loaded_resp)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:upload_to_image',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',
'volume_extend'),
quotas: ('tenant_limit_usages',)})
def test_extend_volume(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 120}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_extend(IsA(http.HttpRequest),
volume.id,
formData['new_size']).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_extend_volume_with_wrong_size(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 10}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormError(res, 'form', None,
"New size must be greater than "
"current size.")
@test.create_stubs({cinder: ('volume_get',
'tenant_absolute_limits')})
def test_retype_volume_supported_action_item(self):
volume = self.cinder_volumes.get(name='v2_volume')
limits = self.cinder_limits['absolute']
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertContains(res, 'Change Volume Type')
self.assertContains(res, 'retype')
@test.create_stubs({cinder: ('volume_get',
'volume_retype',
'volume_type_list')})
def test_retype_volume(self):
volume = self.cinder_volumes.get(name='my_volume2')
volume_type = self.cinder_volume_types.get(name='vol_type_1')
form_data = {'id': volume.id,
'name': volume.name,
'volume_type': volume_type.name,
'migration_policy': 'on-demand'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_type_list(
IsA(http.HttpRequest)).AndReturn(self.cinder_volume_types.list())
cinder.volume_retype(
IsA(http.HttpRequest),
volume.id,
form_data['volume_type'],
form_data['migration_policy']).AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:retype',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
def test_encryption_false(self):
self._test_encryption(False)
def test_encryption_true(self):
self._test_encryption(True)
@test.create_stubs({cinder: ('volume_list',
'volume_snapshot_list',
'volume_backup_supported',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def _test_encryption(self, encryption):
volumes = self.volumes.list()
for volume in volumes:
volume.encrypted = encryption
limits = self.cinder_limits['absolute']
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes('backup_supported').AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn(self.cinder_volume_snapshots.list())
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
rows = res.context['volumes_table'].get_rows()
if encryption:
column_value = 'Yes'
else:
column_value = 'No'
for row in rows:
self.assertEqual(row.cells['encryption'].data, column_value)
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_extend_volume_with_size_out_of_quota(self):
volume = self.volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 1000}
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormError(res, "form", "new_size",
"Volume cannot be extended to 1000GB as you only "
"have 80GB of your quota available.")
@test.create_stubs({cinder: ('volume_backup_supported',
'volume_list',
'volume_snapshot_list',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def test_create_transfer_availability(self):
limits = self.cinder_limits['absolute']
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
table = res.context['volumes_table']
# Verify that the create transfer action is present if and only if
# the volume is available
for vol in table.data:
actions = [a.name for a in table.get_row_actions(vol)]
self.assertEqual('create_transfer' in actions,
vol.status == 'available')
@test.create_stubs({cinder: ('transfer_create',)})
def test_create_transfer(self):
volumes = self.volumes.list()
volToTransfer = [v for v in volumes if v.status == 'available'][0]
formData = {'volume_id': volToTransfer.id,
'name': u'any transfer name'}
cinder.transfer_create(IsA(http.HttpRequest),
formData['volume_id'],
formData['name']).AndReturn(
self.cinder_volume_transfers.first())
self.mox.ReplayAll()
# Create a transfer for the first available volume
url = reverse('horizon:project:volumes:volumes:create_transfer',
args=[volToTransfer.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
@test.create_stubs({cinder: ('volume_backup_supported',
'volume_list',
'volume_snapshot_list',
'transfer_delete',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def test_delete_transfer(self):
transfer = self.cinder_volume_transfers.first()
volumes = []
# Attach the volume transfer to the relevant volume
for v in self.cinder_volumes.list():
if v.id == transfer.volume_id:
v.status = 'awaiting-transfer'
v.transfer = transfer
volumes.append(v)
formData = {'action':
'volumes__delete_transfer__%s' % transfer.volume_id}
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
cinder.transfer_delete(IsA(http.HttpRequest), transfer.id)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
self.assertIn('Successfully deleted volume transfer "test transfer"',
[m.message for m in res.context['messages']])
@test.create_stubs({cinder: ('transfer_accept',)})
def test_accept_transfer(self):
transfer = self.cinder_volume_transfers.first()
cinder.transfer_accept(IsA(http.HttpRequest), transfer.id,
transfer.auth_key)
self.mox.ReplayAll()
formData = {'transfer_id': transfer.id, 'auth_key': transfer.auth_key}
url = reverse('horizon:project:volumes:volumes:accept_transfer')
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
| apache-2.0 |
cloudera/hue | desktop/core/ext-py/SQLAlchemy-1.3.17/test/base/test_except.py | 3 | 13032 | #! coding:utf-8
"""Tests exceptions and DB-API exception wrapping."""
from sqlalchemy import exc as sa_exceptions
from sqlalchemy.engine import default
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.util import compat
from sqlalchemy.util import u
class Error(Exception):
pass
class DatabaseError(Error):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
def __str__(self):
return "<%s>" % self.bogus
class OutOfSpec(DatabaseError):
pass
# exception with a totally different name...
class WrongNameError(DatabaseError):
pass
# but they're going to call it their "IntegrityError"
IntegrityError = WrongNameError
# and they're going to subclass it!
class SpecificIntegrityError(WrongNameError):
pass
class WrapTest(fixtures.TestBase):
def _translating_dialect_fixture(self):
d = default.DefaultDialect()
d.dbapi_exception_translation_map = {
"WrongNameError": "IntegrityError"
}
return d
def test_db_error_normal(self):
try:
raise sa_exceptions.DBAPIError.instance(
"", [], OperationalError(), DatabaseError
)
except sa_exceptions.DBAPIError:
self.assert_(True)
def test_tostring(self):
try:
raise sa_exceptions.DBAPIError.instance(
"this is a message", None, OperationalError(), DatabaseError
)
except sa_exceptions.DBAPIError as exc:
eq_(
str(exc),
"(test.base.test_except.OperationalError) \n"
"[SQL: this is a message]\n"
"(Background on this error at: http://sqlalche.me/e/e3q8)",
)
def test_tostring_with_newlines(self):
try:
raise sa_exceptions.DBAPIError.instance(
"this is a message\nthis is the next line\nthe last line",
None,
OperationalError(),
DatabaseError,
)
except sa_exceptions.DBAPIError as exc:
eq_(
str(exc),
"(test.base.test_except.OperationalError) \n"
"[SQL: this is a message\nthis is the next line\n"
"the last line]\n"
"(Background on this error at: http://sqlalche.me/e/e3q8)",
)
def test_statement_error_no_code(self):
try:
raise sa_exceptions.DBAPIError.instance(
"select * from table",
[{"x": 1}],
sa_exceptions.InvalidRequestError("hello"),
DatabaseError,
)
except sa_exceptions.StatementError as err:
eq_(
str(err),
"(sqlalchemy.exc.InvalidRequestError) hello\n"
"[SQL: select * from table]\n[parameters: [{'x': 1}]]",
)
eq_(err.args, ("(sqlalchemy.exc.InvalidRequestError) hello",))
def test_statement_error_w_code(self):
try:
raise sa_exceptions.DBAPIError.instance(
"select * from table",
[{"x": 1}],
sa_exceptions.InvalidRequestError("hello", code="abcd"),
DatabaseError,
)
except sa_exceptions.StatementError as err:
eq_(
str(err),
"(sqlalchemy.exc.InvalidRequestError) hello\n"
"[SQL: select * from table]\n"
"[parameters: [{'x': 1}]]\n"
"(Background on this error at: http://sqlalche.me/e/abcd)",
)
eq_(err.args, ("(sqlalchemy.exc.InvalidRequestError) hello",))
def test_wrap_multi_arg(self):
# this is not supported by the API but oslo_db is doing it
orig = sa_exceptions.DBAPIError(False, False, False)
orig.args = [2006, "Test raise operational error"]
eq_(
str(orig),
"(2006, 'Test raise operational error')\n"
"(Background on this error at: http://sqlalche.me/e/dbapi)",
)
def test_wrap_unicode_arg(self):
# this is not supported by the API but oslo_db is doing it
orig = sa_exceptions.DBAPIError(False, False, False)
orig.args = [u("méil")]
eq_(
compat.text_type(orig),
compat.u(
"méil\n(Background on this error at: "
"http://sqlalche.me/e/dbapi)"
),
)
eq_(orig.args, (u("méil"),))
def test_tostring_large_dict(self):
try:
raise sa_exceptions.DBAPIError.instance(
"this is a message",
{
"a": 1,
"b": 2,
"c": 3,
"d": 4,
"e": 5,
"f": 6,
"g": 7,
"h": 8,
"i": 9,
"j": 10,
"k": 11,
},
OperationalError(),
DatabaseError,
)
except sa_exceptions.DBAPIError as exc:
assert str(exc).startswith(
"(test.base.test_except.OperationalError) \n"
"[SQL: this is a message]\n"
"[parameters: {"
)
def test_tostring_large_list(self):
try:
raise sa_exceptions.DBAPIError.instance(
"this is a message",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
OperationalError(),
DatabaseError,
)
except sa_exceptions.DBAPIError as ex:
assert str(ex).startswith(
"(test.base.test_except.OperationalError) \n"
"[SQL: this is a message]\n[parameters: "
"[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]"
)
def test_tostring_large_executemany(self):
try:
raise sa_exceptions.DBAPIError.instance(
"this is a message",
[
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
],
OperationalError("sql error"),
DatabaseError,
)
except sa_exceptions.DBAPIError as exc:
eq_(
str(exc),
"(test.base.test_except.OperationalError) sql error\n"
"[SQL: this is a message]\n"
"[parameters: [{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1},"
" {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}]]\n"
"(Background on this error at: http://sqlalche.me/e/e3q8)",
)
eq_(
exc.args,
("(test.base.test_except.OperationalError) sql error",),
)
try:
raise sa_exceptions.DBAPIError.instance(
"this is a message",
[
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
{1: 1},
],
OperationalError(),
DatabaseError,
ismulti=True,
)
except sa_exceptions.DBAPIError as exc:
eq_(
str(exc),
"(test.base.test_except.OperationalError) \n"
"[SQL: this is a message]\n"
"[parameters: [{1: 1}, "
"{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, "
"{1: 1}, {1: 1} ... displaying 10 of 11 total "
"bound parameter sets ... {1: 1}, {1: 1}]]\n"
"(Background on this error at: http://sqlalche.me/e/e3q8)",
)
try:
raise sa_exceptions.DBAPIError.instance(
"this is a message",
[(1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,)],
OperationalError(),
DatabaseError,
)
except sa_exceptions.DBAPIError as exc:
eq_(
str(exc),
"(test.base.test_except.OperationalError) \n"
"[SQL: this is a message]\n"
"[parameters: [(1,), "
"(1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,)]]\n"
"(Background on this error at: http://sqlalche.me/e/e3q8)",
)
try:
raise sa_exceptions.DBAPIError.instance(
"this is a message",
[
(1,),
(1,),
(1,),
(1,),
(1,),
(1,),
(1,),
(1,),
(1,),
(1,),
(1,),
],
OperationalError(),
DatabaseError,
ismulti=True,
)
except sa_exceptions.DBAPIError as exc:
eq_(
str(exc),
"(test.base.test_except.OperationalError) \n"
"[SQL: this is a message]\n"
"[parameters: [(1,), "
"(1,), (1,), (1,), (1,), (1,), (1,), (1,) "
"... displaying 10 of 11 total bound "
"parameter sets ... (1,), (1,)]]\n"
"(Background on this error at: http://sqlalche.me/e/e3q8)",
)
def test_db_error_busted_dbapi(self):
try:
raise sa_exceptions.DBAPIError.instance(
"", [], ProgrammingError(), DatabaseError
)
except sa_exceptions.DBAPIError as e:
self.assert_(True)
self.assert_("Error in str() of DB-API" in e.args[0])
def test_db_error_noncompliant_dbapi(self):
try:
raise sa_exceptions.DBAPIError.instance(
"", [], OutOfSpec(), DatabaseError
)
except sa_exceptions.DBAPIError as e:
# OutOfSpec subclasses DatabaseError
self.assert_(e.__class__ is sa_exceptions.DatabaseError)
except OutOfSpec:
self.assert_(False)
try:
raise sa_exceptions.DBAPIError.instance(
"", [], sa_exceptions.ArgumentError(), DatabaseError
)
except sa_exceptions.DBAPIError as e:
self.assert_(e.__class__ is sa_exceptions.DBAPIError)
except sa_exceptions.ArgumentError:
self.assert_(False)
dialect = self._translating_dialect_fixture()
try:
raise sa_exceptions.DBAPIError.instance(
"",
[],
sa_exceptions.ArgumentError(),
DatabaseError,
dialect=dialect,
)
except sa_exceptions.DBAPIError as e:
self.assert_(e.__class__ is sa_exceptions.DBAPIError)
except sa_exceptions.ArgumentError:
self.assert_(False)
def test_db_error_dbapi_uses_wrong_names(self):
dialect = self._translating_dialect_fixture()
try:
raise sa_exceptions.DBAPIError.instance(
"", [], IntegrityError(), DatabaseError, dialect=dialect
)
except sa_exceptions.DBAPIError as e:
self.assert_(e.__class__ is sa_exceptions.IntegrityError)
try:
raise sa_exceptions.DBAPIError.instance(
"",
[],
SpecificIntegrityError(),
DatabaseError,
dialect=dialect,
)
except sa_exceptions.DBAPIError as e:
self.assert_(e.__class__ is sa_exceptions.IntegrityError)
try:
raise sa_exceptions.DBAPIError.instance(
"", [], SpecificIntegrityError(), DatabaseError
)
except sa_exceptions.DBAPIError as e:
# doesn't work without a dialect
self.assert_(e.__class__ is not sa_exceptions.IntegrityError)
def test_db_error_keyboard_interrupt(self):
try:
raise sa_exceptions.DBAPIError.instance(
"", [], KeyboardInterrupt(), DatabaseError
)
except sa_exceptions.DBAPIError:
self.assert_(False)
except KeyboardInterrupt:
self.assert_(True)
def test_db_error_system_exit(self):
try:
raise sa_exceptions.DBAPIError.instance(
"", [], SystemExit(), DatabaseError
)
except sa_exceptions.DBAPIError:
self.assert_(False)
except SystemExit:
self.assert_(True)
| apache-2.0 |
Shrulik/Open-Knesset | links/migrations/0006_auto__chg_field_linkedfile_link.py | 14 | 3052 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'LinkedFile.link'
db.alter_column('links_linkedfile', 'link_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['links.Link'], null=True))
def backwards(self, orm):
# Changing field 'LinkedFile.link'
db.alter_column('links_linkedfile', 'link_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['links.Link']))
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'links.link': {
'Meta': {'object_name': 'Link'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_link'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['links.LinkType']", 'null': 'True', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
'links.linkedfile': {
'Meta': {'object_name': 'LinkedFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['links.Link']", 'null': 'True', 'blank': 'True'}),
'link_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'sha1': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['links']
| bsd-3-clause |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/test/time_hashlib.py | 25 | 2895 | # It's intended that this script be run by hand. It runs speed tests on
# hashlib functions; it does not test for correctness.
import sys
import time
import hashlib
def creatorFunc():
raise RuntimeError("eek, creatorFunc not overridden")
def test_scaled_msg(scale, name):
iterations = 106201//scale * 20
longStr = b'Z'*scale
localCF = creatorFunc
start = time.time()
for f in range(iterations):
x = localCF(longStr).digest()
end = time.time()
print(('%2.2f' % (end-start)), "seconds", iterations, "x", len(longStr), "bytes", name)
def test_create():
start = time.time()
for f in range(20000):
d = creatorFunc()
end = time.time()
print(('%2.2f' % (end-start)), "seconds", '[20000 creations]')
def test_zero():
start = time.time()
for f in range(20000):
x = creatorFunc().digest()
end = time.time()
print(('%2.2f' % (end-start)), "seconds", '[20000 "" digests]')
hName = sys.argv[1]
#
# setup our creatorFunc to test the requested hash
#
if hName in ('_md5', '_sha'):
exec('import '+hName)
exec('creatorFunc = '+hName+'.new')
print("testing speed of old", hName, "legacy interface")
elif hName == '_hashlib' and len(sys.argv) > 3:
import _hashlib
exec('creatorFunc = _hashlib.%s' % sys.argv[2])
print("testing speed of _hashlib.%s" % sys.argv[2], getattr(_hashlib, sys.argv[2]))
elif hName == '_hashlib' and len(sys.argv) == 3:
import _hashlib
exec('creatorFunc = lambda x=_hashlib.new : x(%r)' % sys.argv[2])
print("testing speed of _hashlib.new(%r)" % sys.argv[2])
elif hasattr(hashlib, hName) and hasattr(getattr(hashlib, hName), '__call__'):
creatorFunc = getattr(hashlib, hName)
print("testing speed of hashlib."+hName, getattr(hashlib, hName))
else:
exec("creatorFunc = lambda x=hashlib.new : x(%r)" % hName)
print("testing speed of hashlib.new(%r)" % hName)
try:
test_create()
except ValueError:
print()
print("pass argument(s) naming the hash to run a speed test on:")
print(" '_md5' and '_sha' test the legacy builtin md5 and sha")
print(" '_hashlib' 'openssl_hName' 'fast' tests the builtin _hashlib")
print(" '_hashlib' 'hName' tests builtin _hashlib.new(shaFOO)")
print(" 'hName' tests the hashlib.hName() implementation if it exists")
print(" otherwise it uses hashlib.new(hName).")
print()
raise
test_zero()
test_scaled_msg(scale=106201, name='[huge data]')
test_scaled_msg(scale=10620, name='[large data]')
test_scaled_msg(scale=1062, name='[medium data]')
test_scaled_msg(scale=424, name='[4*small data]')
test_scaled_msg(scale=336, name='[3*small data]')
test_scaled_msg(scale=212, name='[2*small data]')
test_scaled_msg(scale=106, name='[small data]')
test_scaled_msg(scale=creatorFunc().digest_size, name='[digest_size data]')
test_scaled_msg(scale=10, name='[tiny data]')
| gpl-2.0 |
AlkamiCode/snake-game | node_modules/node-gyp/gyp/pylib/gyp/__init__.py | 1524 | 22178 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True, duplicate_basename_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '')
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
duplicate_basename_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# --no-duplicate-basename-check disables the check for duplicate basenames
# in a static_library/shared_library project. Visual C++ 2008 generator
# doesn't support this configuration. Libtool on Mac also generates warnings
# when duplicate basenames are passed into Make generator on Mac.
# TODO(yukawa): Remove this option when these legacy generators are
# deprecated.
parser.add_option('--no-duplicate-basename-check',
dest='duplicate_basename_check', action='store_false',
default=True, regenerate=False,
help="don't check for duplicate basenames")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split(r'[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel,
'root_targets': options.root_targets,
'target_arch': cmdline_default_variables.get('target_arch', '')}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(
build_files, format, cmdline_default_variables, includes, options.depth,
params, options.check, options.circular_check,
options.duplicate_basename_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
# NOTE: setuptools generated console_scripts calls function with no arguments
def script_main():
return main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(script_main())
| mit |
ramiroluz/saap | saap/cerimonial/tests/test_model_processo.py | 2 | 4832 | from django.test import TestCase
from django.utils.datetime_safe import date
from saap.cerimonial.models import Processo, StatusProcesso, Contato, SEXO_CHOICE
from django.shortcuts import resolve_url as r
from saap.core.models import AreaTrabalho, User, Bairro
class ProcessoModelTest(TestCase):
def setUp(self):
status_processo = StatusProcesso.objects.create(
descricao='Status do processo'
)
user = User.objects.create(
email='test@test.org',
first_name='',
last_name='',
)
workspace = AreaTrabalho.objects.create(
nome='Área de trabalho',
owner=user,
modifier=user,
)
contato = Contato.objects.create(
nome='Contato 1',
sexo='M',
naturalidade='Brasileiro',
nome_pai='',
nome_mae='',
# numero_sus='',
# cpf='',
# titulo_eleitor="",
# rg='',
# rg_orgao_expedidor=''
workspace=workspace,
modifier=user,
owner=user,
# profissao='',
)
bairro = Bairro.objects.create(nome='Bairro')
self.processo = Processo.objects.create(
titulo='Título do processo',
data=date(2016, 10, 10),
protocolo='10004D',
proto_cam='99.999999.9999',
proto_pref='XX999FQG9',
instituicao='Instituição',
rua='Rua',
orgao='Órgão',
bairro=bairro,
status=status_processo,
urgente=True,
data_solucao=date(2016, 10, 12),
importancia='M',
workspace=workspace,
owner=user,
modifier=user,
)
self.processo.contatos.add(contato)
def test_create(self):
self.assertTrue(Processo.objects.exists())
def test_descricao_blank(self):
field = Processo._meta.get_field('descricao')
self.assertTrue(field.blank)
def test_descricao_default_to_empty_string(self):
self.assertEqual('', self.processo.descricao)
def test_observacoes_blank(self):
field = Processo._meta.get_field('observacoes')
self.assertTrue(field.blank)
def test_observacoes_default_to_empty_string(self):
self.assertEqual('', self.processo.observacoes)
def test_solucao_blank(self):
field = Processo._meta.get_field('solucao')
self.assertTrue(field.blank)
def test_solucao_default_to_empty_string(self):
self.assertEqual('', self.processo.solucao)
def test_contatos_blank(self):
field = Processo._meta.get_field('contatos')
self.assertTrue(field.blank)
def test_beneficiario_blank(self):
field = Processo._meta.get_field('beneficiario')
self.assertTrue(field.blank)
def test_status_blank_and_null(self):
field = Processo._meta.get_field('status')
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_protocolo_null(self):
field = Processo._meta.get_field('protocolo')
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_date_value(self):
self.assertEqual(date(2016, 10, 10), self.processo.data)
def test_protocolo_value(self):
self.assertEqual('10004D', self.processo.protocolo)
def test_proto_cam_value(self):
self.assertEqual('99.999999.9999', self.processo.proto_cam)
def test_proto_pref_value(self):
self.assertEqual('XX999FQG9', self.processo.proto_pref)
def test_insticuicao_value(self):
self.assertEqual('Instituição', self.processo.instituicao)
def test_rua_value(self):
self.assertEqual('Rua', self.processo.rua)
def test_orgao_value(self):
self.assertEqual('Órgão', self.processo.orgao)
def test_bairro_value(self):
self.assertEqual('Bairro', self.processo.bairro.nome)
def test_status_value(self):
self.assertEqual('Status do processo', self.processo.status.descricao)
def test_urgente_value(self):
self.assertTrue(self.processo.urgente)
def test_data_solucao_value(self):
self.assertEqual(date(2016, 10, 12), self.processo.data_solucao)
def test_importancia_blank(self):
field = Processo._meta.get_field('importancia')
self.assertTrue(field.blank)
def test_topicos_blank(self):
field = Processo._meta.get_field('topicos')
self.assertTrue(field.blank)
def test_classificacoes_blank(self):
field = Processo._meta.get_field('classificacoes')
self.assertTrue(field.blank)
def test_assuntos_blank(self):
field = Processo._meta.get_field('assuntos')
self.assertTrue(field.blank) | gpl-3.0 |
tiagochiavericosta/edx-platform | openedx/core/djangoapps/user_api/serializers.py | 49 | 2111 | from django.contrib.auth.models import User
from rest_framework import serializers
from student.models import UserProfile
from .models import UserPreference
class UserSerializer(serializers.HyperlinkedModelSerializer):
name = serializers.SerializerMethodField("get_name")
preferences = serializers.SerializerMethodField("get_preferences")
def get_name(self, user):
profile = UserProfile.objects.get(user=user)
return profile.name
def get_preferences(self, user):
return dict([(pref.key, pref.value) for pref in user.preferences.all()])
class Meta(object): # pylint: disable=missing-docstring
model = User
# This list is the minimal set required by the notification service
fields = ("id", "url", "email", "name", "username", "preferences")
read_only_fields = ("id", "email", "username")
class UserPreferenceSerializer(serializers.HyperlinkedModelSerializer):
user = UserSerializer()
class Meta(object): # pylint: disable=missing-docstring
model = UserPreference
depth = 1
class RawUserPreferenceSerializer(serializers.ModelSerializer):
"""Serializer that generates a raw representation of a user preference.
"""
user = serializers.PrimaryKeyRelatedField()
class Meta(object): # pylint: disable=missing-docstring
model = UserPreference
depth = 1
class ReadOnlyFieldsSerializerMixin(object):
"""
Mixin for use with Serializers that provides a method
`get_read_only_fields`, which returns a tuple of all read-only
fields on the Serializer.
"""
@classmethod
def get_read_only_fields(cls):
"""
Return all fields on this Serializer class which are read-only.
Expects sub-classes implement Meta.explicit_read_only_fields,
which is a tuple declaring read-only fields which were declared
explicitly and thus could not be added to the usual
cls.Meta.read_only_fields tuple.
"""
return getattr(cls.Meta, 'read_only_fields', '') + getattr(cls.Meta, 'explicit_read_only_fields', '')
| agpl-3.0 |
ar4s/django | tests/datatypes/tests.py | 5 | 4208 | from __future__ import unicode_literals
import datetime
from django.test import TestCase, skipIfDBFeature
from django.utils import six
from django.utils.timezone import utc
from .models import Donut, RumBaba
class DataTypesTestCase(TestCase):
def test_boolean_type(self):
d = Donut(name='Apple Fritter')
self.assertFalse(d.is_frosted)
self.assertTrue(d.has_sprinkles is None)
d.has_sprinkles = True
self.assertTrue(d.has_sprinkles)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertFalse(d2.is_frosted)
self.assertTrue(d2.has_sprinkles)
def test_date_type(self):
d = Donut(name='Apple Fritter')
d.baked_date = datetime.date(year=1938, month=6, day=4)
d.baked_time = datetime.time(hour=5, minute=30)
d.consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertEqual(d2.baked_date, datetime.date(1938, 6, 4))
self.assertEqual(d2.baked_time, datetime.time(5, 30))
self.assertEqual(d2.consumed_at, datetime.datetime(2007, 4, 20, 16, 19, 59))
def test_time_field(self):
#Test for ticket #12059: TimeField wrongly handling datetime.datetime object.
d = Donut(name='Apple Fritter')
d.baked_time = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertEqual(d2.baked_time, datetime.time(16, 19, 59))
def test_year_boundaries(self):
"""Year boundary tests (ticket #3689)"""
Donut.objects.create(name='Date Test 2007',
baked_date=datetime.datetime(year=2007, month=12, day=31),
consumed_at=datetime.datetime(year=2007, month=12, day=31, hour=23, minute=59, second=59))
Donut.objects.create(name='Date Test 2006',
baked_date=datetime.datetime(year=2006, month=1, day=1),
consumed_at=datetime.datetime(year=2006, month=1, day=1))
self.assertEqual("Date Test 2007",
Donut.objects.filter(baked_date__year=2007)[0].name)
self.assertEqual("Date Test 2006",
Donut.objects.filter(baked_date__year=2006)[0].name)
Donut.objects.create(name='Apple Fritter',
consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59))
self.assertEqual(['Apple Fritter', 'Date Test 2007'],
list(Donut.objects.filter(consumed_at__year=2007).order_by('name').values_list('name', flat=True)))
self.assertEqual(0, Donut.objects.filter(consumed_at__year=2005).count())
self.assertEqual(0, Donut.objects.filter(consumed_at__year=2008).count())
def test_textfields_unicode(self):
"""Regression test for #10238: TextField values returned from the
database should be unicode."""
d = Donut.objects.create(name='Jelly Donut', review='Outstanding')
newd = Donut.objects.get(id=d.id)
self.assertIsInstance(newd.review, six.text_type)
@skipIfDBFeature('supports_timezones')
def test_error_on_timezone(self):
"""Regression test for #8354: the MySQL and Oracle backends should raise
an error if given a timezone-aware datetime object."""
dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=utc)
d = Donut(name='Bear claw', consumed_at=dt)
self.assertRaises(ValueError, d.save)
# ValueError: MySQL backend does not support timezone-aware datetimes.
def test_datefield_auto_now_add(self):
"""Regression test for #10970, auto_now_add for DateField should store
a Python datetime.date, not a datetime.datetime"""
b = RumBaba.objects.create()
# Verify we didn't break DateTimeField behavior
self.assertIsInstance(b.baked_timestamp, datetime.datetime)
# We need to test this this way because datetime.datetime inherits
# from datetime.date:
self.assertIsInstance(b.baked_date, datetime.date) and not isinstance(b.baked_date, datetime.datetime)
| bsd-3-clause |
GuillaumeGomez/servo | tests/wpt/web-platform-tests/tools/webdriver/webdriver/error.py | 82 | 3570 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import collections
class WebDriverException(Exception):
http_status = None
status_code = None
class ElementNotSelectableException(WebDriverException):
http_status = 400
status_code = "element not selectable"
class ElementNotVisibleException(WebDriverException):
http_status = 400
status_code = "element not visible"
class InsecureCertificateException(WebDriverException):
http_status = 400
status_code = "insecure certificate"
class InvalidArgumentException(WebDriverException):
http_status = 400
status_code = "invalid argument"
class InvalidCookieDomainException(WebDriverException):
http_status = 400
status_code = "invalid cookie domain"
class InvalidElementCoordinatesException(WebDriverException):
http_status = 400
status_code = "invalid element coordinates"
class InvalidElementStateException(WebDriverException):
http_status = 400
status_code = "invalid cookie domain"
class InvalidSelectorException(WebDriverException):
http_status = 400
status_code = "invalid selector"
class InvalidSessionIdException(WebDriverException):
http_status = 404
status_code = "invalid session id"
class JavascriptErrorException(WebDriverException):
http_status = 500
status_code = "javascript error"
class MoveTargetOutOfBoundsException(WebDriverException):
http_status = 500
status_code = "move target out of bounds"
class NoSuchAlertException(WebDriverException):
http_status = 400
status_code = "no such alert"
class NoSuchElementException(WebDriverException):
http_status = 404
status_code = "no such element"
class NoSuchFrameException(WebDriverException):
http_status = 400
status_code = "no such frame"
class NoSuchWindowException(WebDriverException):
http_status = 400
status_code = "no such window"
class ScriptTimeoutException(WebDriverException):
http_status = 408
status_code = "script timeout"
class SessionNotCreatedException(WebDriverException):
http_status = 500
status_code = "session not created"
class StaleElementReferenceException(WebDriverException):
http_status = 400
status_code = "stale element reference"
class TimeoutException(WebDriverException):
http_status = 408
status_code = "timeout"
class UnableToSetCookieException(WebDriverException):
http_status = 500
status_code = "unable to set cookie"
class UnexpectedAlertOpenException(WebDriverException):
http_status = 500
status_code = "unexpected alert open"
class UnknownErrorException(WebDriverException):
http_status = 500
status_code = "unknown error"
class UnknownCommandException(WebDriverException):
http_status = 404
status_code = "unknown command"
class UnknownMethodException(WebDriverException):
http_status = 405
status_code = "unknown method"
class UnsupportedOperationException(WebDriverException):
http_status = 500
status_code = "unsupported operation"
def get(status_code):
"""Gets exception from `status_code`, falling back to
``WebDriverException`` if it is not found.
"""
return _errors.get(status_code, WebDriverException)
_errors = collections.defaultdict()
for item in locals().values():
if type(item) == type and issubclass(item, WebDriverException):
_errors[item.status_code] = item
| mpl-2.0 |
rgom/Pydev | plugins/org.python.pydev.jython/Lib/unittest/test/test_case.py | 89 | 43816 | import difflib
import pprint
import pickle
import re
import sys
from copy import deepcopy
from test import test_support
import unittest
from .support import (
TestEquality, TestHashing, LoggingResult, ResultWithNoStartTestRunStopTestRun
)
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest'))
,(Test.Foo('test1'), Test.Bar('test1'))
,(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise TypeError()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise TypeError()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise TypeError()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'addError', 'tearDown',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'addError',
'tearDown', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'addFailure', 'tearDown',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'addFailure',
'tearDown', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertTrue(Foo('test').failureException is AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), basestring)
# "If result is omitted or None, a temporary result object is created
# and used, but is not made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return LoggingResult(events)
# Make run() find a result object on its own
Foo('test').run()
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertNotEqual(s1, s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) is type(b) is SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with test_support.check_warnings(("", UnicodeWarning)):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': u'\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) < len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**8
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = u'x' * (2**7)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = u'x' * (2**9)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertItemsEqual(self):
a = object()
self.assertItemsEqual([1, 2, 3], [3, 2, 1])
self.assertItemsEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertItemsEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertItemsEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertItemsEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertItemsEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertItemsEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertItemsEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertItemsEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertItemsEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertItemsEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertItemsEqual(a, b)
# test utility functions supporting assertItemsEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try Unicode
self.assertGreater(u'bug', u'ant')
self.assertGreaterEqual(u'bug', u'ant')
self.assertGreaterEqual(u'ant', u'ant')
self.assertLess(u'ant', u'bug')
self.assertLessEqual(u'ant', u'bug')
self.assertLessEqual(u'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreater, u'ant', u'bug')
self.assertRaises(self.failureException, self.assertGreater, u'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, u'ant',
u'bug')
self.assertRaises(self.failureException, self.assertLess, u'bug', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'ant', u'ant')
self.assertRaises(self.failureException, self.assertLessEqual, u'bug', u'ant')
# Try Mixed String/Unicode
self.assertGreater('bug', u'ant')
self.assertGreater(u'bug', 'ant')
self.assertGreaterEqual('bug', u'ant')
self.assertGreaterEqual(u'bug', 'ant')
self.assertGreaterEqual('ant', u'ant')
self.assertGreaterEqual(u'ant', 'ant')
self.assertLess('ant', u'bug')
self.assertLess(u'ant', 'bug')
self.assertLessEqual('ant', u'bug')
self.assertLessEqual(u'ant', 'bug')
self.assertLessEqual('ant', u'ant')
self.assertLessEqual(u'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', u'bug')
self.assertRaises(self.failureException, self.assertGreater, u'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreater, u'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant',
u'bug')
self.assertRaises(self.failureException, self.assertGreaterEqual, u'ant',
'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', u'ant')
self.assertRaises(self.failureException, self.assertLessEqual, u'bug', 'ant')
def testAssertMultiLineEqual(self):
sample_text = b"""\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = b"""\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = b"""\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
for type_changer in (lambda x: x, lambda x: x.decode('utf8')):
try:
self.assertMultiLineEqual(type_changer(sample_text),
type_changer(revised_sample_text))
except self.failureException, e:
# need to remove the first line of the error message
error = str(e).encode('utf8').split('\n', 1)[1]
# assertMultiLineEqual is hooked up as the default for
# unicode strings - so we can't use it for this check
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = u"laden swallows fly slowly"
revised_sample_text = u"unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegexpMatches(self):
self.assertRegexpMatches('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegexpMatches,
'saaas', r'aaaa')
def testAssertRaisesRegexp(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegexp(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegexp(ExceptionMock, 'expect$', Stub)
self.assertRaisesRegexp(ExceptionMock, u'expect$', Stub)
def testAssertNotRaisesRegexp(self):
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, 'x',
lambda: None)
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, u'x',
lambda: None)
def testAssertRaisesRegexpMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception, '^Expected$',
Stub)
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception, u'^Expected$',
Stub)
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testSynonymAssertMethodNames(self):
"""Test undocumented method name synonyms.
Please do not use these methods names in your own code.
This test confirms their continued existence and functionality
in order to avoid breaking existing code.
"""
self.assertNotEquals(3, 5)
self.assertEquals(3, 3)
self.assertAlmostEquals(2.0, 2.0)
self.assertNotAlmostEquals(3.0, 5.0)
self.assert_(True)
def testPendingDeprecationMethodNames(self):
"""Test fail* methods pending deprecation, they will warn in 3.2.
Do not use these methods. They will go away in 3.3.
"""
with test_support.check_warnings():
self.failIfEqual(3, 5)
self.failUnlessEqual(3, 3)
self.failUnlessAlmostEqual(2.0, 2.0)
self.failIfAlmostEqual(3.0, 5.0)
self.failUnless(True)
self.failUnlessRaises(TypeError, lambda _: 3.14 + u'spam')
self.failIf(False)
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
if __name__ == '__main__':
unittest.main()
| epl-1.0 |
todaychi/hue | desktop/core/ext-py/lxml-3.3.6/benchmark/bench_objectify.py | 18 | 3322 | import sys, copy
from itertools import *
import benchbase
from benchbase import (with_attributes, with_text, onlylib,
serialized, children, nochange)
############################################################
# Benchmarks
############################################################
class BenchMark(benchbase.TreeBenchMark):
repeat100 = range(100)
repeat1000 = range(1000)
repeat3000 = range(3000)
def __init__(self, lib):
from lxml import etree, objectify
self.objectify = objectify
parser = etree.XMLParser(remove_blank_text=True)
lookup = objectify.ObjectifyElementClassLookup()
parser.setElementClassLookup(lookup)
super(BenchMark, self).__init__(etree, parser)
@nochange
def bench_attribute(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz
def bench_attribute_assign_int(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = 5
def bench_attribute_assign_string(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = "5"
@nochange
def bench_attribute_cached(self, root):
"1 2 4"
cache = root.zzzzz
for i in self.repeat3000:
root.zzzzz
@nochange
def bench_attributes_deep(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz['{cdefg}a00001']
@nochange
def bench_attributes_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1['{cdefg}a00001']
for i in self.repeat3000:
root.zzzzz['{cdefg}a00001']
@nochange
def bench_objectpath(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1['{cdefg}a00001']
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@with_text(text=True, utext=True, no_text=True)
def bench_annotate(self, root):
self.objectify.annotate(root)
@nochange
def bench_descendantpaths(self, root):
root.descendantpaths()
@nochange
@with_text(text=True)
def bench_type_inference(self, root):
"1 2 4"
el = root.aaaaa
for i in self.repeat1000:
el.getchildren()
@nochange
@with_text(text=True)
def bench_type_inference_annotated(self, root):
"1 2 4"
el = root.aaaaa
self.objectify.annotate(el)
for i in self.repeat1000:
el.getchildren()
@nochange
@children
def bench_elementmaker(self, children):
E = self.objectify.E
for child in children:
root = E.this(
"test",
E.will(
E.do("nothing"),
E.special,
)
)
if __name__ == '__main__':
benchbase.main(BenchMark)
| apache-2.0 |
nemesisdesign/django | tests/forms_tests/widget_tests/test_checkboxinput.py | 64 | 3460 | from django.forms import CheckboxInput
from .base import WidgetTest
class CheckboxInputTest(WidgetTest):
widget = CheckboxInput()
def test_render_empty(self):
self.check_html(self.widget, 'is_cool', '', html='<input type="checkbox" name="is_cool" />')
def test_render_none(self):
self.check_html(self.widget, 'is_cool', None, html='<input type="checkbox" name="is_cool" />')
def test_render_false(self):
self.check_html(self.widget, 'is_cool', False, html='<input type="checkbox" name="is_cool" />')
def test_render_true(self):
self.check_html(
self.widget, 'is_cool', True,
html='<input checked type="checkbox" name="is_cool" />'
)
def test_render_value(self):
"""
Using any value that's not in ('', None, False, True) will check the
checkbox and set the 'value' attribute.
"""
self.check_html(
self.widget, 'is_cool', 'foo',
html='<input checked type="checkbox" name="is_cool" value="foo" />',
)
def test_render_int(self):
"""
Integers are handled by value, not as booleans (#17114).
"""
self.check_html(
self.widget, 'is_cool', 0,
html='<input checked type="checkbox" name="is_cool" value="0" />',
)
self.check_html(
self.widget, 'is_cool', 1,
html='<input checked type="checkbox" name="is_cool" value="1" />',
)
def test_render_check_test(self):
"""
You can pass 'check_test' to the constructor. This is a callable that
takes the value and returns True if the box should be checked.
"""
widget = CheckboxInput(check_test=lambda value: value.startswith('hello'))
self.check_html(widget, 'greeting', '', html=(
'<input type="checkbox" name="greeting" />'
))
self.check_html(widget, 'greeting', 'hello', html=(
'<input checked type="checkbox" name="greeting" value="hello" />'
))
self.check_html(widget, 'greeting', 'hello there', html=(
'<input checked type="checkbox" name="greeting" value="hello there" />'
))
self.check_html(widget, 'greeting', 'hello & goodbye', html=(
'<input checked type="checkbox" name="greeting" value="hello & goodbye" />'
))
def test_render_check_exception(self):
"""
Calling check_test() shouldn't swallow exceptions (#17888).
"""
widget = CheckboxInput(
check_test=lambda value: value.startswith('hello'),
)
with self.assertRaises(AttributeError):
widget.render('greeting', True)
def test_value_from_datadict(self):
"""
The CheckboxInput widget will return False if the key is not found in
the data dictionary (because HTML form submission doesn't send any
result for unchecked checkboxes).
"""
self.assertFalse(self.widget.value_from_datadict({}, {}, 'testing'))
def test_value_from_datadict_string_int(self):
value = self.widget.value_from_datadict({'testing': '0'}, {}, 'testing')
self.assertIs(value, True)
def test_value_omitted_from_data(self):
self.assertIs(self.widget.value_omitted_from_data({'field': 'value'}, {}, 'field'), False)
self.assertIs(self.widget.value_omitted_from_data({}, {}, 'field'), False)
| bsd-3-clause |
aspidites/django | tests/view_tests/tests/test_debug.py | 150 | 39389 | # -*- coding: utf-8 -*-
# This coding header is significant for tests, as the debug view is parsing
# files to search for such a header to decode the source file content
from __future__ import unicode_literals
import importlib
import inspect
import os
import re
import sys
import tempfile
from unittest import skipIf
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.db import DatabaseError, connection
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import SimpleLazyObject
from django.views.debug import (
CallableSettingWrapper, ExceptionReporter, technical_500_response,
)
from .. import BrokenException, except_args
from ..views import (
custom_exception_reporter_filter_view, multivalue_dict_key_error,
non_sensitive_view, paranoid_view, sensitive_args_function_caller,
sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view,
)
if six.PY3:
from .py3_test_debug import Py3ExceptionReporterTests # NOQA
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable(object):
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
class DebugViewTests(LoggingCaptureMixin, SimpleTestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_view_exceptions(self):
for n in range(len(except_args)):
self.assertRaises(BrokenException, self.client.get,
reverse('view_exception', args=(n,)))
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr)
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(raising_loc.find('raise BrokenException'), -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
self.assertRaises(TemplateDoesNotExist, self.client.get, '/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default urlconf template is shown shown instead
of the technical 404 page, if the user has not altered their
url conf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
allow_database_queries = True
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF="view_tests.urls",
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""Test that the ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = list('print %d' % i for i in range(1, 6))
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput(object):
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput(object):
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
@skipIf(six.PY2, 'Bug manifests on PY3 only')
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError on Python 3. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ImportError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
try:
html = reporter.get_traceback_html()
except BrokenEvaluation:
self.fail("Broken evaluation in traceback is not caught.")
self.assertIn(
"BrokenEvaluation",
html,
"Evaluation exception reason not mentioned in traceback"
)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
class ExceptionReportTestMixin(object):
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Ensure that sensitive information can be filtered out of error reports.
Refs #14614.
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Ensure that everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
Ensure that no POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Ensure that sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
Ensure that the sensitive_variables decorator works with object
methods.
Refs #18379.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view,
check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view,
check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as arguments to the
decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as keyword arguments
to the decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots(object):
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Ensure that sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Ensure that request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
Ensure that no POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
| bsd-3-clause |
PokemonGoF/PokemonGo-Bot-Desktop | build/pywin/Lib/lib2to3/fixes/fix_apply.py | 315 | 1904 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for apply().
This converts apply(func, v, k) into (func)(*v, **k)."""
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Call, Comma, parenthesize
class FixApply(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'apply'
trailer<
'('
arglist<
(not argument<NAME '=' any>) func=any ','
(not argument<NAME '=' any>) args=any [','
(not argument<NAME '=' any>) kwds=any] [',']
>
')'
>
>
"""
def transform(self, node, results):
syms = self.syms
assert results
func = results["func"]
args = results["args"]
kwds = results.get("kwds")
prefix = node.prefix
func = func.clone()
if (func.type not in (token.NAME, syms.atom) and
(func.type != syms.power or
func.children[-2].type == token.DOUBLESTAR)):
# Need to parenthesize
func = parenthesize(func)
func.prefix = ""
args = args.clone()
args.prefix = ""
if kwds is not None:
kwds = kwds.clone()
kwds.prefix = ""
l_newargs = [pytree.Leaf(token.STAR, u"*"), args]
if kwds is not None:
l_newargs.extend([Comma(),
pytree.Leaf(token.DOUBLESTAR, u"**"),
kwds])
l_newargs[-2].prefix = u" " # that's the ** token
# XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
# can be translated into f(x, y, *t) instead of f(*(x, y) + t)
#new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
return Call(func, l_newargs, prefix=prefix)
| mit |
beni55/networkx | networkx/algorithms/tests/test_threshold.py | 19 | 6669 | #!/usr/bin/env python
"""Threshold Graphs
================
"""
from nose.tools import *
from nose import SkipTest
from nose.plugins.attrib import attr
import networkx as nx
import networkx.algorithms.threshold as nxt
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic
cnlti = nx.convert_node_labels_to_integers
class TestGeneratorThreshold():
def test_threshold_sequence_graph_test(self):
G=nx.star_graph(10)
assert_true(nxt.is_threshold_graph(G))
assert_true(nxt.is_threshold_sequence(list(d for n, d in G.degree())))
G=nx.complete_graph(10)
assert_true(nxt.is_threshold_graph(G))
assert_true(nxt.is_threshold_sequence(list(d for n, d in G.degree())))
deg=[3,2,2,1,1,1]
assert_false(nxt.is_threshold_sequence(deg))
deg=[3,2,2,1]
assert_true(nxt.is_threshold_sequence(deg))
G=nx.generators.havel_hakimi_graph(deg)
assert_true(nxt.is_threshold_graph(G))
def test_creation_sequences(self):
deg=[3,2,2,1]
G=nx.generators.havel_hakimi_graph(deg)
cs0=nxt.creation_sequence(deg)
H0=nxt.threshold_graph(cs0)
assert_equal(''.join(cs0), 'ddid')
cs1=nxt.creation_sequence(deg, with_labels=True)
H1=nxt.threshold_graph(cs1)
assert_equal(cs1, [(1, 'd'), (2, 'd'), (3, 'i'), (0, 'd')])
cs2=nxt.creation_sequence(deg, compact=True)
H2=nxt.threshold_graph(cs2)
assert_equal(cs2, [2, 1, 1])
assert_equal(''.join(nxt.uncompact(cs2)), 'ddid')
assert_true(graph_could_be_isomorphic(H0,G))
assert_true(graph_could_be_isomorphic(H0,H1))
assert_true(graph_could_be_isomorphic(H0,H2))
def test_shortest_path(self):
deg=[3,2,2,1]
G=nx.generators.havel_hakimi_graph(deg)
cs1=nxt.creation_sequence(deg, with_labels=True)
for n, m in [(3, 0), (0, 3), (0, 2), (0, 1), (1, 3),
(3, 1), (1, 2), (2, 3)]:
assert_equal(nxt.shortest_path(cs1,n,m),
nx.shortest_path(G, n, m))
spl=nxt.shortest_path_length(cs1,3)
spl2=nxt.shortest_path_length([ t for v,t in cs1],2)
assert_equal(spl, spl2)
spld={}
for j,pl in enumerate(spl):
n=cs1[j][0]
spld[n]=pl
assert_equal(spld, dict(nx.single_source_shortest_path_length(G, 3)))
def test_weights_thresholds(self):
wseq=[3,4,3,3,5,6,5,4,5,6]
cs=nxt.weights_to_creation_sequence(wseq,threshold=10)
wseq=nxt.creation_sequence_to_weights(cs)
cs2=nxt.weights_to_creation_sequence(wseq)
assert_equal(cs, cs2)
wseq=nxt.creation_sequence_to_weights(nxt.uncompact([3,1,2,3,3,2,3]))
assert_equal(wseq,
[s*0.125 for s in [4,4,4,3,5,5,2,2,2,6,6,6,1,1,7,7,7]])
wseq=nxt.creation_sequence_to_weights([3,1,2,3,3,2,3])
assert_equal(wseq,
[s*0.125 for s in [4,4,4,3,5,5,2,2,2,6,6,6,1,1,7,7,7]])
wseq=nxt.creation_sequence_to_weights(list(enumerate('ddidiiidididi')))
assert_equal(wseq,
[s*0.1 for s in [5,5,4,6,3,3,3,7,2,8,1,9,0]])
wseq=nxt.creation_sequence_to_weights('ddidiiidididi')
assert_equal(wseq,
[s*0.1 for s in [5,5,4,6,3,3,3,7,2,8,1,9,0]])
wseq=nxt.creation_sequence_to_weights('ddidiiidididid')
ws=[s/float(12) for s in [6,6,5,7,4,4,4,8,3,9,2,10,1,11]]
assert_true(sum([abs(c-d) for c,d in zip(wseq,ws)]) < 1e-14)
def test_finding_routines(self):
G=nx.Graph({1:[2],2:[3],3:[4],4:[5],5:[6]})
G.add_edge(2,4)
G.add_edge(2,5)
G.add_edge(2,7)
G.add_edge(3,6)
G.add_edge(4,6)
# Alternating 4 cycle
assert_equal(nxt.find_alternating_4_cycle(G), [1, 2, 3, 6])
# Threshold graph
TG=nxt.find_threshold_graph(G)
assert_true(nxt.is_threshold_graph(TG))
assert_equal(sorted(TG.nodes()), [1, 2, 3, 4, 5, 7])
cs=nxt.creation_sequence(dict(TG.degree()), with_labels=True)
assert_equal(nxt.find_creation_sequence(G), cs)
def test_fast_versions_properties_threshold_graphs(self):
cs='ddiiddid'
G=nxt.threshold_graph(cs)
assert_equal(nxt.density('ddiiddid'), nx.density(G))
assert_equal(sorted(nxt.degree_sequence(cs)),
sorted(d for n, d in G.degree()))
ts=nxt.triangle_sequence(cs)
assert_equal(ts, list(nx.triangles(G).values()))
assert_equal(sum(ts) // 3, nxt.triangles(cs))
c1=nxt.cluster_sequence(cs)
c2=list(nx.clustering(G).values())
assert_almost_equal(sum([abs(c-d) for c,d in zip(c1,c2)]), 0)
b1=nx.betweenness_centrality(G).values()
b2=nxt.betweenness_sequence(cs)
assert_true(sum([abs(c-d) for c,d in zip(b1,b2)]) < 1e-14)
assert_equal(nxt.eigenvalues(cs), [0, 1, 3, 3, 5, 7, 7, 8])
# Degree Correlation
assert_true(abs(nxt.degree_correlation(cs)+0.593038821954) < 1e-12)
assert_equal(nxt.degree_correlation('diiiddi'), -0.8)
assert_equal(nxt.degree_correlation('did'), -1.0)
assert_equal(nxt.degree_correlation('ddd'), 1.0)
assert_equal(nxt.eigenvalues('dddiii'), [0, 0, 0, 0, 3, 3])
assert_equal(nxt.eigenvalues('dddiiid'), [0, 1, 1, 1, 4, 4, 7])
def test_tg_creation_routines(self):
s=nxt.left_d_threshold_sequence(5,7)
s=nxt.right_d_threshold_sequence(5,7)
s1=nxt.swap_d(s,1.0,1.0)
@attr('numpy')
def test_eigenvectors(self):
try:
import numpy as N
eigenval=N.linalg.eigvals
import scipy
except ImportError:
raise SkipTest('SciPy not available.')
cs='ddiiddid'
G=nxt.threshold_graph(cs)
(tgeval,tgevec)=nxt.eigenvectors(cs)
dot=N.dot
assert_equal([ abs(dot(lv,lv)-1.0)<1e-9 for lv in tgevec ], [True]*8)
lapl=nx.laplacian_matrix(G)
# tgev=[ dot(lv,dot(lapl,lv)) for lv in tgevec ]
# assert_true(sum([abs(c-d) for c,d in zip(tgev,tgeval)]) < 1e-9)
# tgev.sort()
# lev=list(eigenval(lapl))
# lev.sort()
# assert_true(sum([abs(c-d) for c,d in zip(tgev,lev)]) < 1e-9)
def test_create_using(self):
cs='ddiiddid'
G=nxt.threshold_graph(cs)
assert_raises(nx.exception.NetworkXError,
nxt.threshold_graph, cs, create_using=nx.DiGraph())
MG=nxt.threshold_graph(cs,create_using=nx.MultiGraph())
assert_equal(sorted(MG.edges()), sorted(G.edges()))
| bsd-3-clause |
steedos/odoo | addons/project/res_config.py | 232 | 4551 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_configuration(osv.osv_memory):
_name = 'project.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_sale_service': fields.boolean('Generate tasks from sale orders',
help='This feature automatically creates project tasks from service products in sale orders. '
'More precisely, tasks are created for procurement lines with product of type \'Service\', '
'procurement method \'Make to Order\', and supply method \'Manufacture\'.\n'
'-This installs the module sale_service.'),
'module_pad': fields.boolean("Use integrated collaborative note pads on task",
help='Lets the company customize which Pad installation should be used to link to new pads '
'(for example: http://ietherpad.com/).\n'
'-This installs the module pad.'),
'module_project_timesheet': fields.boolean("Record timesheet lines per tasks",
help='This allows you to transfer the entries under tasks defined for Project Management to '
'the timesheet line entries for particular date and user, with the effect of creating, '
'editing and deleting either ways.\n'
'-This installs the module project_timesheet.'),
'module_project_issue': fields.boolean("Track issues and bugs",
help='Provides management of issues/bugs in projects.\n'
'-This installs the module project_issue.'),
'time_unit': fields.many2one('product.uom', 'Working time unit', required=True,
help='This will set the unit of measure used in projects and tasks.\n'
'Changing the unit will only impact new entries.'),
'module_project_issue_sheet': fields.boolean("Invoice working time on issues",
help='Provides timesheet support for the issues/bugs management in project.\n'
'-This installs the module project_issue_sheet.'),
'group_tasks_work_on_tasks': fields.boolean("Log work activities on tasks",
implied_group='project.group_tasks_work_on_tasks',
help="Allows you to compute work on tasks."),
'group_time_work_estimation_tasks': fields.boolean("Manage time estimation on tasks",
implied_group='project.group_time_work_estimation_tasks',
help="Allows you to compute Time Estimation on tasks."),
'group_manage_delegation_task': fields.boolean("Allow task delegation",
implied_group='project.group_delegate_task',
help="Allows you to delegate tasks to other users."),
}
def get_default_time_unit(self, cr, uid, fields, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return {'time_unit': user.company_id.project_time_mode_id.id}
def set_time_unit(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
user = self.pool.get('res.users').browse(cr, uid, uid, context)
user.company_id.write({'project_time_mode_id': config.time_unit.id})
def onchange_time_estimation_project_timesheet(self, cr, uid, ids, group_time_work_estimation_tasks, module_project_timesheet):
if group_time_work_estimation_tasks or module_project_timesheet:
return {'value': {'group_tasks_work_on_tasks': True}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-60/files/myenv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py | 222 | 19653 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import MissingSchema, InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringI
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return {}
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return {}
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return {}
# If we get here, we either didn't have no_proxy set or we're not going
# anywhere that no_proxy applies to, and the system settings don't require
# bypassing the proxy for the current URL.
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate', 'compress')),
'Accept': '*/*'
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def except_on_missing_scheme(url):
"""Given a URL, raise a MissingSchema exception if the scheme is missing.
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
if not scheme:
raise MissingSchema('Proxy URLs must have explicit schemes.')
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
| gpl-3.0 |
chirilo/kuma | vendor/packages/pygments/lexers/testing.py | 72 | 8704 | # -*- coding: utf-8 -*-
"""
pygments.lexers.testing
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for testing languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Comment, Keyword, Name, String
__all__ = ['GherkinLexer']
class GherkinLexer(RegexLexer):
"""
For `Gherkin <http://github.com/aslakhellesoy/gherkin/>` syntax.
.. versionadded:: 1.2
"""
name = 'Gherkin'
aliases = ['cucumber', 'gherkin']
filenames = ['*.feature']
mimetypes = ['text/x-gherkin']
feature_keywords = u'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
feature_element_keywords = u'^(\\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
examples_keywords = u'^(\\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
step_keywords = u'^(\\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )'
tokens = {
'comments': [
(r'^\s*#.*$', Comment),
],
'feature_elements': [
(step_keywords, Keyword, "step_content_stack"),
include('comments'),
(r"(\s|.)", Name.Function),
],
'feature_elements_on_stack': [
(step_keywords, Keyword, "#pop:2"),
include('comments'),
(r"(\s|.)", Name.Function),
],
'examples_table': [
(r"\s+\|", Keyword, 'examples_table_header'),
include('comments'),
(r"(\s|.)", Name.Function),
],
'examples_table_header': [
(r"\s+\|\s*$", Keyword, "#pop:2"),
include('comments'),
(r"\\\|", Name.Variable),
(r"\s*\|", Keyword),
(r"[^|]", Name.Variable),
],
'scenario_sections_on_stack': [
(feature_element_keywords,
bygroups(Name.Function, Keyword, Keyword, Name.Function),
"feature_elements_on_stack"),
],
'narrative': [
include('scenario_sections_on_stack'),
include('comments'),
(r"(\s|.)", Name.Function),
],
'table_vars': [
(r'(<[^>]+>)', Name.Variable),
],
'numbers': [
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', String),
],
'string': [
include('table_vars'),
(r'(\s|.)', String),
],
'py_string': [
(r'"""', Keyword, "#pop"),
include('string'),
],
'step_content_root': [
(r"$", Keyword, "#pop"),
include('step_content'),
],
'step_content_stack': [
(r"$", Keyword, "#pop:2"),
include('step_content'),
],
'step_content': [
(r'"', Name.Function, "double_string"),
include('table_vars'),
include('numbers'),
include('comments'),
(r'(\s|.)', Name.Function),
],
'table_content': [
(r"\s+\|\s*$", Keyword, "#pop"),
include('comments'),
(r"\\\|", String),
(r"\s*\|", Keyword),
include('string'),
],
'double_string': [
(r'"', Name.Function, "#pop"),
include('string'),
],
'root': [
(r'\n', Name.Function),
include('comments'),
(r'"""', Keyword, "py_string"),
(r'\s+\|', Keyword, 'table_content'),
(r'"', Name.Function, "double_string"),
include('table_vars'),
include('numbers'),
(r'(\s*)(@[^@\r\n\t ]+)', bygroups(Name.Function, Name.Tag)),
(step_keywords, bygroups(Name.Function, Keyword),
'step_content_root'),
(feature_keywords, bygroups(Keyword, Keyword, Name.Function),
'narrative'),
(feature_element_keywords,
bygroups(Name.Function, Keyword, Keyword, Name.Function),
'feature_elements'),
(examples_keywords,
bygroups(Name.Function, Keyword, Keyword, Name.Function),
'examples_table'),
(r'(\s|.)', Name.Function),
]
}
| mpl-2.0 |
Jgarcia-IAS/localizacion | openerp/addons/sale_journal/__openerp__.py | 262 | 2637 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoicing Journals',
'version': '1.0',
'category': 'Sales Management',
'description': """
The sales journal modules allows you to categorise your sales and deliveries (picking lists) between different journals.
========================================================================================================================
This module is very helpful for bigger companies that works by departments.
You can use journal for different purposes, some examples:
----------------------------------------------------------
* isolate sales of different departments
* journals for deliveries by truck or by UPS
Journals have a responsible and evolves between different status:
-----------------------------------------------------------------
* draft, open, cancel, done.
Batch operations can be processed on the different journals to confirm all sales
at once, to validate or invoice packing.
It also supports batch invoicing methods that can be configured by partners and sales orders, examples:
-------------------------------------------------------------------------------------------------------
* daily invoicing
* monthly invoicing
Some statistics by journals are provided.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_journal_view.xml',
'sale_journal_data.xml'
],
'demo': ['sale_journal_demo.xml'],
'test': [ ],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
snehasi/servo | tests/wpt/css-tests/tools/pytest/_pytest/resultlog.py | 208 | 3536 | """ log machine-parseable test session result information in a plain
text file.
"""
import py
import os
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "resultlog plugin options")
group.addoption('--resultlog', '--result-log', action="store",
metavar="path", default=None,
help="path for machine-readable result log.")
def pytest_configure(config):
resultlog = config.option.resultlog
# prevent opening resultlog on slave nodes (xdist)
if resultlog and not hasattr(config, 'slaveinput'):
dirname = os.path.dirname(os.path.abspath(resultlog))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(resultlog, 'w', 1) # line buffered
config._resultlog = ResultLog(config, logfile)
config.pluginmanager.register(config._resultlog)
def pytest_unconfigure(config):
resultlog = getattr(config, '_resultlog', None)
if resultlog:
resultlog.logfile.close()
del config._resultlog
config.pluginmanager.unregister(resultlog)
def generic_path(item):
chain = item.listchain()
gpath = [chain[0].name]
fspath = chain[0].fspath
fspart = False
for node in chain[1:]:
newfspath = node.fspath
if newfspath == fspath:
if fspart:
gpath.append(':')
fspart = False
else:
gpath.append('.')
else:
gpath.append('/')
fspart = True
name = node.name
if name[0] in '([':
gpath.pop()
gpath.append(name)
fspath = newfspath
return ''.join(gpath)
class ResultLog(object):
def __init__(self, config, logfile):
self.config = config
self.logfile = logfile # preferably line buffered
def write_log_entry(self, testpath, lettercode, longrepr):
py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile)
for line in longrepr.splitlines():
py.builtin.print_(" %s" % line, file=self.logfile)
def log_outcome(self, report, lettercode, longrepr):
testpath = getattr(report, 'nodeid', None)
if testpath is None:
testpath = report.fspath
self.write_log_entry(testpath, lettercode, longrepr)
def pytest_runtest_logreport(self, report):
if report.when != "call" and report.passed:
return
res = self.config.hook.pytest_report_teststatus(report=report)
code = res[1]
if code == 'x':
longrepr = str(report.longrepr)
elif code == 'X':
longrepr = ''
elif report.passed:
longrepr = ""
elif report.failed:
longrepr = str(report.longrepr)
elif report.skipped:
longrepr = str(report.longrepr[2])
self.log_outcome(report, code, longrepr)
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
code = "F"
longrepr = str(report.longrepr)
else:
assert report.skipped
code = "S"
longrepr = "%s:%d: %s" % report.longrepr
self.log_outcome(report, code, longrepr)
def pytest_internalerror(self, excrepr):
reprcrash = getattr(excrepr, 'reprcrash', None)
path = getattr(reprcrash, "path", None)
if path is None:
path = "cwd:%s" % py.path.local()
self.write_log_entry(path, '!', str(excrepr))
| mpl-2.0 |
qpython-android/QPypi-numpy | numpy/lib/type_check.py | 2 | 13492 | ## Automatically adapted for numpy Sep 19, 2005 by convertcode.py
__all__ = ['iscomplexobj','isrealobj','imag','iscomplex',
'isreal','nan_to_num','real','real_if_close',
'typename','asfarray','mintypecode','asscalar',
'common_type']
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, asanyarray, array, isnan, \
obj2sctype, zeros
from ufunclike import isneginf, isposinf
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
def mintypecode(typechars,typeset='GDFgdf',default='d'):
""" Return a minimum data type character from typeset that
handles all typechars given
The returned type character must be the smallest size such that
an array of the returned type can handle the data from an array of
type t for each t in typechars (or if typechars is an array,
then its dtype.char).
If the typechars does not intersect with the typeset, then default
is returned.
If t in typechars is not a string then t=asarray(t).dtype.char is
applied.
"""
typecodes = [(type(t) is type('') and t) or asarray(t).dtype.char\
for t in typechars]
intersection = [t for t in typecodes if t in typeset]
if not intersection:
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
l = []
for t in intersection:
i = _typecodes_by_elsize.index(t)
l.append((i,t))
l.sort()
return l[0][1]
def asfarray(a, dtype=_nx.float_):
"""
Return an array converted to a float type.
Parameters
----------
a : array_like
The input array.
dtype : str or dtype object, optional
Float type code to coerce input array `a`. If `dtype` is one of the
'int' dtypes, it is replaced with float64.
Returns
-------
out : ndarray
The input `a` as a float ndarray.
Examples
--------
>>> np.asfarray([2, 3])
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='float')
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='int8')
array([ 2., 3.])
"""
dtype = _nx.obj2sctype(dtype)
if not issubclass(dtype, _nx.inexact):
dtype = _nx.float_
return asarray(a,dtype=dtype)
def real(val):
"""
Return the real part of the elements of the array.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray
If `val` is real, the type of `val` is used for the output. If `val`
has complex elements, the returned type is float.
See Also
--------
real_if_close, imag, angle
Examples
--------
>>> a = np.array([1+2j,3+4j,5+6j])
>>> a.real
array([ 1., 3., 5.])
>>> a.real = 9
>>> a
array([ 9.+2.j, 9.+4.j, 9.+6.j])
>>> a.real = np.array([9,8,7])
>>> a
array([ 9.+2.j, 8.+4.j, 7.+6.j])
"""
return asanyarray(val).real
def imag(val):
"""
Return the imaginary part of array.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray, real or int
Real part of each element, same shape as `val`.
"""
return asanyarray(val).imag
def iscomplex(x):
"""
Returns a bool array, where True if input element is complex.
What is tested is whether the input has a non-zero imaginary part, not if
the input type is complex.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray, bool
Output array.
See Also
--------
isreal: Returns a bool array, where True if input element is real.
iscomplexobj: Return True if x is a complex type or an array of complex
numbers.
Examples
--------
>>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([ True, False, False, False, False, True], dtype=bool)
"""
ax = asanyarray(x)
if issubclass(ax.dtype.type, _nx.complexfloating):
return ax.imag != 0
res = zeros(ax.shape, bool)
return +res # convet to array-scalar if needed
def isreal(x):
"""
Returns a bool array, where True if input element is real.
If the input value has a complex type but with complex part zero, the
return value is True.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray, bool
Boolean array of same shape as `x`.
See Also
--------
iscomplex: Return a bool array, where True if input element is complex
(non-zero imaginary part).
isrealobj: Return True if x is not a complex type.
Examples
--------
>>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j])
>>> array([False, True, True, True, True, False], dtype=bool)
"""
return imag(x) == 0
def iscomplexobj(x):
"""
Return True if x is a complex type or an array of complex numbers.
The type of the input is checked, not the value. So even if the input
has an imaginary part equal to zero, `iscomplexobj` evaluates to True
if the data type is complex.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
y : bool
The return value, True if `x` is of a complex type.
See Also
--------
isrealobj, iscomplex
Examples
--------
>>> np.iscomplexobj(1)
False
>>> np.iscomplexobj(1+0j)
True
np.iscomplexobj([3, 1+0j, True])
True
"""
return issubclass( asarray(x).dtype.type, _nx.complexfloating)
def isrealobj(x):
"""
Return True if x is a not complex type or an array of complex numbers.
The type of the input is checked, not the value. So even if the input
has an imaginary part equal to zero, `isrealobj` evaluates to False
if the data type is complex.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
y : bool
The return value, False if `x` is of a complex type.
See Also
--------
iscomplexobj, isreal
Examples
--------
>>> np.isrealobj(1)
True
>>> np.isrealobj(1+0j)
False
>>> np.isrealobj([3, 1+0j, True])
False
"""
return not issubclass( asarray(x).dtype.type, _nx.complexfloating)
#-----------------------------------------------------------------------------
def _getmaxmin(t):
import getlimits
f = getlimits.finfo(t)
return f.max, f.min
def nan_to_num(x):
"""
Replace nan with zero and inf with finite numbers.
Returns an array or scalar replacing Not a Number (NaN) with zero,
(positive) infinity with a very large number and negative infinity
with a very small (or negative) number.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray, float
Array with the same shape as `x` and dtype of the element in `x` with
the greatest precision. NaN is replaced by zero, and infinity
(-infinity) is replaced by the largest (smallest or most negative)
floating point value that fits in the output dtype. All finite numbers
are upcast to the output dtype (default float64).
See Also
--------
isinf : Shows which elements are negative or negative infinity.
isneginf : Shows which elements are negative infinity.
isposinf : Shows which elements are positive infinity.
isnan : Shows which elements are Not a Number (NaN).
isfinite : Shows which elements are finite (not NaN, not infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
-1.28000000e+002, 1.28000000e+002])
"""
try:
t = x.dtype.type
except AttributeError:
t = obj2sctype(type(x))
if issubclass(t, _nx.complexfloating):
return nan_to_num(x.real) + 1j * nan_to_num(x.imag)
else:
try:
y = x.copy()
except AttributeError:
y = array(x)
if not issubclass(t, _nx.integer):
if not y.shape:
y = array([x])
scalar = True
else:
scalar = False
are_inf = isposinf(y)
are_neg_inf = isneginf(y)
are_nan = isnan(y)
maxf, minf = _getmaxmin(y.dtype.type)
y[are_nan] = 0
y[are_inf] = maxf
y[are_neg_inf] = minf
if scalar:
y = y[0]
return y
#-----------------------------------------------------------------------------
def real_if_close(a,tol=100):
"""
If complex input returns a real array if complex parts are close to zero.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
Parameters
----------
a : array_like
Input array.
tol : float
Tolerance in machine epsilons for the complex part of the elements
in the array.
Returns
-------
out : ndarray
If `a` is real, the type of `a` is used for the output. If `a`
has complex elements, the returned type is float.
See Also
--------
real, imag, angle
Notes
-----
Machine epsilon varies from machine to machine and between data types
but Python floats on most platforms have a machine epsilon equal to
2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print
out the machine epsilon for floats.
Examples
--------
>>> np.finfo(np.float).eps
2.2204460492503131e-16
>>> np.real_if_close([2.1 + 4e-14j], tol=1000)
array([ 2.1])
>>> np.real_if_close([2.1 + 4e-13j], tol=1000)
array([ 2.1 +4.00000000e-13j])
"""
a = asanyarray(a)
if not issubclass(a.dtype.type, _nx.complexfloating):
return a
if tol > 1:
import getlimits
f = getlimits.finfo(a.dtype.type)
tol = f.eps * tol
if _nx.allclose(a.imag, 0, atol=tol):
a = a.real
return a
def asscalar(a):
"""
Convert an array of size 1 to its scalar equivalent.
Parameters
----------
a : ndarray
Input array of size 1.
Returns
-------
out : scalar
Scalar representation of `a`. The input data type is preserved.
Examples
--------
>>> np.asscalar(np.array([24]))
24
"""
return a.item()
#-----------------------------------------------------------------------------
_namefromtype = {'S1' : 'character',
'?' : 'bool',
'b' : 'signed char',
'B' : 'unsigned char',
'h' : 'short',
'H' : 'unsigned short',
'i' : 'integer',
'I' : 'unsigned integer',
'l' : 'long integer',
'L' : 'unsigned long integer',
'q' : 'long long integer',
'Q' : 'unsigned long long integer',
'f' : 'single precision',
'd' : 'double precision',
'g' : 'long precision',
'F' : 'complex single precision',
'D' : 'complex double precision',
'G' : 'complex long double precision',
'S' : 'string',
'U' : 'unicode',
'V' : 'void',
'O' : 'object'
}
def typename(char):
"""
Return a description for the given data type code.
Parameters
----------
char : str
Data type code.
Returns
-------
out : str
Description of the input data type code.
See Also
--------
typecodes
dtype
"""
return _namefromtype[char]
#-----------------------------------------------------------------------------
#determine the "minimum common type" for a group of arrays.
array_type = [[_nx.single, _nx.double, _nx.longdouble],
[_nx.csingle, _nx.cdouble, _nx.clongdouble]]
array_precision = {_nx.single : 0,
_nx.double : 1,
_nx.longdouble : 2,
_nx.csingle : 0,
_nx.cdouble : 1,
_nx.clongdouble : 2}
def common_type(*arrays):
"""
Return the inexact scalar type which is most common in a list of arrays.
The return type will always be an inexact scalar type, even if all the
arrays are integer arrays
Parameters
----------
array1, array2, ... : ndarray
Input arrays.
Returns
-------
out : data type code
Data type code.
See Also
--------
dtype
Examples
--------
>>> np.common_type(np.arange(4), np.array([45,6]), np.array([45.0, 6.0]))
<type 'numpy.float64'>
"""
is_complex = False
precision = 0
for a in arrays:
t = a.dtype.type
if iscomplexobj(a):
is_complex = True
if issubclass(t, _nx.integer):
p = 1
else:
p = array_precision.get(t, None)
if p is None:
raise TypeError("can't get common type for non-numeric array")
precision = max(precision, p)
if is_complex:
return array_type[1][precision]
else:
return array_type[0][precision]
| bsd-3-clause |
imarin/Odoo-Mexico-localization | city/__openerp__.py | 1 | 1941 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2013 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
# Coded by: moylop260 (moylop260@vauxoo.com)
# Julio Serna (julio@vauxoo.com)
# Isaac Lopez (isaac@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "City",
"version" : "1.0",
"author" : "Vauxoo",
"category" : "Localization/Mexico",
"description" : """This module creates the city model similar to states model and adds the field city_id on res partner.
""",
"website" : "http://www.vauxoo.com/",
"license" : "AGPL-3",
"depends" : [
"base",
],
"demo" : [],
"data" : [
'res_city_view.xml',
'partner_address_view.xml',
'security/city_security.xml',
'security/ir.model.access.csv',
],
"installable" : True,
"active" : False,
}
| agpl-3.0 |
yuewko/neutron | neutron/db/l3_attrs_db.py | 63 | 3677 | # Copyright (c) 2014 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import db_base_plugin_v2
from neutron.db import l3_db
from neutron.db import model_base
from neutron.extensions import l3
class RouterExtraAttributes(model_base.BASEV2):
"""Additional attributes for a Virtual Router."""
# NOTE(armando-migliaccio): this model can be a good place to
# add extension attributes to a Router model. Each case needs
# to be individually examined, however 'distributed' and other
# simple ones fit the pattern well.
__tablename__ = "router_extra_attributes"
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
# Whether the router is a legacy (centralized) or a distributed one
distributed = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false(),
nullable=False)
# Whether the router is to be considered a 'service' router
service_router = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false(),
nullable=False)
ha = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false(),
nullable=False)
ha_vr_id = sa.Column(sa.Integer())
router = orm.relationship(
l3_db.Router,
backref=orm.backref("extra_attributes", lazy='joined',
uselist=False, cascade='delete'))
class ExtraAttributesMixin(object):
"""Mixin class to enable router's extra attributes."""
extra_attributes = []
def _extend_extra_router_dict(self, router_res, router_db):
extra_attrs = router_db['extra_attributes'] or {}
for attr in self.extra_attributes:
name = attr['name']
default = attr['default']
router_res[name] = (
extra_attrs[name] if name in extra_attrs else default)
def _get_extra_attributes(self, router, extra_attributes):
return (dict((attr['name'],
router.get(attr['name'], attr['default']))
for attr in extra_attributes))
def _process_extra_attr_router_create(
self, context, router_db, router_req):
kwargs = self._get_extra_attributes(router_req, self.extra_attributes)
# extra_attributes reference is populated via backref
if not router_db['extra_attributes']:
attributes_db = RouterExtraAttributes(
router_id=router_db['id'], **kwargs)
context.session.add(attributes_db)
router_db['extra_attributes'] = attributes_db
else:
# The record will exist if RouterExtraAttributes model's
# attributes are added with db migrations over time
router_db['extra_attributes'].update(kwargs)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, ['_extend_extra_router_dict'])
| apache-2.0 |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/gluon/contrib/login_methods/openid_auth.py | 42 | 25869 | #!/usr/bin/env python
# coding: utf8
"""
OpenID authentication for web2py
Allowed using OpenID login together with web2py built-in login.
By default, to support OpenID login, put this in your db.py
>>> from gluon.contrib.login_methods.openid_auth import OpenIDAuth
>>> auth.settings.login_form = OpenIDAuth(auth)
To show OpenID list in user profile, you can add the following code
before the end of function user() of your_app/controllers/default.py
+ if (request.args and request.args(0) == "profile"):
+ form = DIV(form, openid_login_form.list_user_openids())
return dict(form=form, login_form=login_form, register_form=register_form, self_registration=self_registration)
More detail in the description of the class OpenIDAuth.
Requirements:
python-openid version 2.2.5 or later
Reference:
* w2p openID
http://w2popenid.appspot.com/init/default/wiki/w2popenid
* RPX and web2py auth module
http://www.web2pyslices.com/main/slices/take_slice/28
* built-in file: gluon/contrib/login_methods/rpx_account.py
* built-in file: gluon/tools.py (Auth class)
"""
import time
from datetime import datetime, timedelta
from gluon import *
from gluon.storage import Storage, Messages
try:
import openid.consumer.consumer
from openid.association import Association
from openid.store.interface import OpenIDStore
from openid.extensions.sreg import SRegRequest, SRegResponse
from openid.store import nonce
from openid.consumer.discover import DiscoveryFailure
except ImportError, err:
raise ImportError("OpenIDAuth requires python-openid package")
DEFAULT = lambda: None
class OpenIDAuth(object):
"""
OpenIDAuth
It supports the logout_url, implementing the get_user and login_form
for cas usage of gluon.tools.Auth.
It also uses the ExtendedLoginForm to allow the OpenIDAuth login_methods
combined with the standard logon/register procedure.
It uses OpenID Consumer when render the form and begins the OpenID
authentication.
Example: (put these code after auth.define_tables() in your models.)
auth = Auth(globals(), db) # authentication/authorization
...
auth.define_tables() # creates all needed tables
...
#include in your model after auth has been defined
from gluon.contrib.login_methods.openid_auth import OpenIDAuth
openid_login_form = OpenIDAuth(request, auth, db)
from gluon.contrib.login_methods.extended_login_form import ExtendedLoginForm
extended_login_form = ExtendedLoginForm(request, auth, openid_login_form,
signals=['oid','janrain_nonce'])
auth.settings.login_form = extended_login_form
"""
def __init__(self, auth):
self.auth = auth
self.db = auth.db
request = current.request
self.nextvar = '_next'
self.realm = 'http://%s' % request.env.http_host
self.login_url = URL(r=request, f='user', args=['login'])
self.return_to_url = self.realm + self.login_url
self.table_alt_logins_name = "alt_logins"
if not auth.settings.table_user:
raise
self.table_user = self.auth.settings.table_user
self.openid_expiration = 15 # minutes
self.messages = self._define_messages()
if not self.table_alt_logins_name in self.db.tables:
self._define_alt_login_table()
def _define_messages(self):
messages = Messages(current.T)
messages.label_alt_login_username = 'Sign-in with OpenID: '
messages.label_add_alt_login_username = 'Add a new OpenID: '
messages.submit_button = 'Sign in'
messages.submit_button_add = 'Add'
messages.a_delete = 'Delete'
messages.comment_openid_signin = 'What is OpenID?'
messages.comment_openid_help_title = 'Start using your OpenID'
messages.comment_openid_help_url = 'http://openid.net/get-an-openid/start-using-your-openid/'
messages.openid_fail_discover = 'Failed to discover OpenID service. Check your OpenID or "More about OpenID"?'
messages.flash_openid_expired = 'OpenID expired. Please login or authenticate OpenID again. Sorry for the inconvenient.'
messages.flash_openid_associated = 'OpenID associated'
messages.flash_associate_openid = 'Please login or register an account for this OpenID.'
messages.p_openid_not_registered = "This Open ID haven't be registered. " \
+ "Please login to associate with it or register an account for it."
messages.flash_openid_authenticated = 'OpenID authenticated successfully.'
messages.flash_openid_fail_authentication = 'OpenID authentication failed. (Error message: %s)'
messages.flash_openid_canceled = 'OpenID authentication canceled by user.'
messages.flash_openid_need_setup = 'OpenID authentication needs to be setup by the user with the provider first.'
messages.h_openid_login = 'OpenID Login'
messages.h_openid_list = 'OpenID List'
return messages
def _define_alt_login_table(self):
"""
Define the OpenID login table.
Note: oidtype is what I used for our project.
We're going to support 'fackbook' and
'plurk' alternate login methods.
Otherwise it's always 'openid' and you
may not need it. This should be easy to changed.
(Just remove the field of "type" and remove the
"and db.alt_logins.oidtype == type_"
in _find_matched_openid function)
"""
db = self.db
table = db.define_table(
self.table_alt_logins_name,
Field('username', length=512, default=''),
Field('oidtype', length=128, default='openid', readable=False),
Field('oiduser', self.table_user, readable=False),
)
table.username.requires = IS_NOT_IN_DB(db, table.username)
self.table_alt_logins = table
def logout_url(self, next):
"""
Delete the w2popenid record in session as logout
"""
if current.session.w2popenid:
del(current.session.w2popenid)
return next
def login_form(self):
"""
Start to process the OpenID response if 'janrain_nonce' in request parameters
and not processed yet. Else return the OpenID form for login.
"""
request = current.request
if 'janrain_nonce' in request.vars and not self._processed():
self._process_response()
return self.auth()
return self._form()
def get_user(self):
"""
It supports the logout_url, implementing the get_user and login_form
for cas usage of gluon.tools.Auth.
"""
request = current.request
args = request.args
if args[0] == 'logout':
return True # Let logout_url got called
if current.session.w2popenid:
w2popenid = current.session.w2popenid
db = self.db
if (w2popenid.ok is True and w2popenid.oid): # OpenID authenticated
if self._w2popenid_expired(w2popenid):
del(current.session.w2popenid)
flash = self.messages.flash_openid_expired
current.session.warning = flash
redirect(self.auth.settings.login_url)
oid = self._remove_protocol(w2popenid.oid)
alt_login = self._find_matched_openid(db, oid)
nextvar = self.nextvar
# This OpenID not in the database. If user logged in then add it
# into database, else ask user to login or register.
if not alt_login:
if self.auth.is_logged_in():
# TODO: ask first maybe
self._associate_user_openid(self.auth.user, oid)
if current.session.w2popenid:
del(current.session.w2popenid)
current.session.flash = self.messages.flash_openid_associated
if nextvar in request.vars:
redirect(request.vars[nextvar])
redirect(self.auth.settings.login_next)
if nextvar not in request.vars:
# no next var, add it and do login again
# so if user login or register can go back here to associate the OpenID
redirect(URL(r=request,
args=['login'],
vars={nextvar: self.login_url}))
self.login_form = self._form_with_notification()
current.session.flash = self.messages.flash_associate_openid
return None # need to login or register to associate this openid
# Get existed OpenID user
user = db(
self.table_user.id == alt_login.oiduser).select().first()
if user:
if current.session.w2popenid:
del(current.session.w2popenid)
if 'username' in self.table_user.fields():
username = 'username'
elif 'email' in self.table_user.fields():
username = 'email'
return {username: user[username]} if user else None # login success (almost)
return None # just start to login
def _find_matched_openid(self, db, oid, type_='openid'):
"""
Get the matched OpenID for given
"""
query = (
(db.alt_logins.username == oid) & (db.alt_logins.oidtype == type_))
alt_login = db(query).select().first() # Get the OpenID record
return alt_login
def _associate_user_openid(self, user, oid):
"""
Associate the user logged in with given OpenID
"""
# print "[DB] %s authenticated" % oid
self.db.alt_logins.insert(username=oid, oiduser=user.id)
def _form_with_notification(self):
"""
Render the form for normal login with a notice of OpenID authenticated
"""
form = DIV()
# TODO: check when will happen
if self.auth.settings.login_form in (self.auth, self):
self.auth.settings.login_form = self.auth
form = DIV(self.auth())
register_note = DIV(P(self.messages.p_openid_not_registered))
form.components.append(register_note)
return lambda: form
def _remove_protocol(self, oid):
"""
Remove https:// or http:// from oid url
"""
protocol = 'https://'
if oid.startswith(protocol):
oid = oid[len(protocol):]
return oid
protocol = 'http://'
if oid.startswith(protocol):
oid = oid[len(protocol):]
return oid
return oid
def _init_consumerhelper(self):
"""
Initialize the ConsumerHelper
"""
if not hasattr(self, "consumerhelper"):
self.consumerhelper = ConsumerHelper(current.session,
self.db)
return self.consumerhelper
def _form(self, style=None):
form = DIV(H3(self.messages.h_openid_login), self._login_form(style))
return form
def _login_form(self,
openid_field_label=None,
submit_button=None,
_next=None,
style=None):
"""
Render the form for OpenID login
"""
def warning_openid_fail(session):
session.warning = messages.openid_fail_discover
style = style or """
background-attachment: scroll;
background-repeat: no-repeat;
background-image: url("http://wiki.openid.net/f/openid-16x16.gif");
background-position: 0% 50%;
background-color: transparent;
padding-left: 18px;
width: 400px;
"""
style = style.replace("\n", "")
request = current.request
session = current.session
messages = self.messages
hidden_next_input = ""
if _next == 'profile':
profile_url = URL(r=request, f='user', args=['profile'])
hidden_next_input = INPUT(
_type="hidden", _name="_next", _value=profile_url)
form = FORM(
openid_field_label or self.messages.label_alt_login_username,
INPUT(_type="input", _name="oid",
requires=IS_NOT_EMPTY(
error_message=messages.openid_fail_discover),
_style=style),
hidden_next_input,
INPUT(_type="submit",
_value=submit_button or messages.submit_button),
" ",
A(messages.comment_openid_signin,
_href=messages.comment_openid_help_url,
_title=messages.comment_openid_help_title,
_class='openid-identifier',
_target="_blank"),
_action=self.login_url
)
if form.accepts(request.vars, session):
oid = request.vars.oid
consumerhelper = self._init_consumerhelper()
url = self.login_url
return_to_url = self.return_to_url
if not oid:
warning_openid_fail(session)
redirect(url)
try:
if '_next' in request.vars:
return_to_url = self.return_to_url + \
'?_next=' + request.vars._next
url = consumerhelper.begin(oid, self.realm, return_to_url)
except DiscoveryFailure:
warning_openid_fail(session)
redirect(url)
return form
def _processed(self):
"""
Check if w2popenid authentication is processed.
Return True if processed else False.
"""
processed = (hasattr(current.session, 'w2popenid') and
current.session.w2popenid.ok is True)
return processed
def _set_w2popenid_expiration(self, w2popenid):
"""
Set expiration for OpenID authentication.
"""
w2popenid.expiration = datetime.now(
) + timedelta(minutes=self.openid_expiration)
def _w2popenid_expired(self, w2popenid):
"""
Check if w2popenid authentication is expired.
Return True if expired else False.
"""
return (not w2popenid.expiration) or (datetime.now() > w2popenid.expiration)
def _process_response(self):
"""
Process the OpenID by ConsumerHelper.
"""
request = current.request
request_vars = request.vars
consumerhelper = self._init_consumerhelper()
process_status = consumerhelper.process_response(
request_vars, self.return_to_url)
if process_status == "success":
w2popenid = current.session.w2popenid
user_data = self.consumerhelper.sreg()
current.session.w2popenid.ok = True
self._set_w2popenid_expiration(w2popenid)
w2popenid.user_data = user_data
current.session.flash = self.messages.flash_openid_authenticated
elif process_status == "failure":
flash = self.messages.flash_openid_fail_authentication % consumerhelper.error_message
current.session.warning = flash
elif process_status == "cancel":
current.session.warning = self.messages.flash_openid_canceled
elif process_status == "setup_needed":
current.session.warning = self.messages.flash_openid_need_setup
def list_user_openids(self):
messages = self.messages
request = current.request
if 'delete_openid' in request.vars:
self.remove_openid(request.vars.delete_openid)
query = self.db.alt_logins.oiduser == self.auth.user.id
alt_logins = self.db(query).select()
l = []
for alt_login in alt_logins:
username = alt_login.username
delete_href = URL(r=request, f='user',
args=['profile'],
vars={'delete_openid': username})
delete_link = A(messages.a_delete, _href=delete_href)
l.append(LI(username, " ", delete_link))
profile_url = URL(r=request, f='user', args=['profile'])
#return_to_url = self.return_to_url + '?' + self.nextvar + '=' + profile_url
openid_list = DIV(H3(messages.h_openid_list), UL(l),
self._login_form(
_next='profile',
submit_button=messages.submit_button_add,
openid_field_label=messages.label_add_alt_login_username)
)
return openid_list
def remove_openid(self, openid):
query = self.db.alt_logins.username == openid
self.db(query).delete()
class ConsumerHelper(object):
"""
ConsumerHelper knows the python-openid and
"""
def __init__(self, session, db):
self.session = session
store = self._init_store(db)
self.consumer = openid.consumer.consumer.Consumer(session, store)
def _init_store(self, db):
"""
Initialize Web2pyStore
"""
if not hasattr(self, "store"):
store = Web2pyStore(db)
session = self.session
if 'w2popenid' not in session:
session.w2popenid = Storage()
self.store = store
return self.store
def begin(self, oid, realm, return_to_url):
"""
Begin the OpenID authentication
"""
w2popenid = self.session.w2popenid
w2popenid.oid = oid
auth_req = self.consumer.begin(oid)
auth_req.addExtension(SRegRequest(required=['email', 'nickname']))
url = auth_req.redirectURL(return_to=return_to_url, realm=realm)
return url
def process_response(self, request_vars, return_to_url):
"""
Complete the process and
"""
resp = self.consumer.complete(request_vars, return_to_url)
if resp:
if resp.status == openid.consumer.consumer.SUCCESS:
self.resp = resp
if hasattr(resp, "identity_url"):
self.session.w2popenid.oid = resp.identity_url
return "success"
if resp.status == openid.consumer.consumer.FAILURE:
self.error_message = resp.message
return "failure"
if resp.status == openid.consumer.consumer.CANCEL:
return "cancel"
if resp.status == openid.consumer.consumer.SETUP_NEEDED:
return "setup_needed"
return "no resp"
def sreg(self):
"""
Try to get OpenID Simple Registation
http://openid.net/specs/openid-simple-registration-extension-1_0.html
"""
if self.resp:
resp = self.resp
sreg_resp = SRegResponse.fromSuccessResponse(resp)
return sreg_resp.data if sreg_resp else None
else:
return None
class Web2pyStore(OpenIDStore):
"""
Web2pyStore
This class implements the OpenIDStore interface. OpenID stores take care
of persisting nonces and associations. The Janrain Python OpenID library
comes with implementations for file and memory storage. Web2pyStore uses
the web2py db abstration layer. See the source code docs of OpenIDStore
for a comprehensive description of this interface.
"""
def __init__(self, database):
self.database = database
self.table_oid_associations_name = 'oid_associations'
self.table_oid_nonces_name = 'oid_nonces'
self._initDB()
def _initDB(self):
if self.table_oid_associations_name not in self.database:
self.database.define_table(self.table_oid_associations_name,
Field('server_url',
'string', length=2047, required=True),
Field('handle',
'string', length=255, required=True),
Field('secret', 'blob', required=True),
Field('issued',
'integer', required=True),
Field('lifetime',
'integer', required=True),
Field('assoc_type',
'string', length=64, required=True)
)
if self.table_oid_nonces_name not in self.database:
self.database.define_table(self.table_oid_nonces_name,
Field('server_url',
'string', length=2047, required=True),
Field('itimestamp',
'integer', required=True),
Field('salt', 'string',
length=40, required=True)
)
def storeAssociation(self, server_url, association):
"""
Store associations. If there already is one with the same
server_url and handle in the table replace it.
"""
db = self.database
query = (db.oid_associations.server_url == server_url) & (
db.oid_associations.handle == association.handle)
db(query).delete()
db.oid_associations.insert(server_url=server_url,
handle=association.handle,
secret=association.secret,
issued=association.issued,
lifetime=association.lifetime,
assoc_type=association.assoc_type), 'insert ' * 10
def getAssociation(self, server_url, handle=None):
"""
Return the association for server_url and handle. If handle is
not None return the latests associations for that server_url.
Return None if no association can be found.
"""
db = self.database
query = (db.oid_associations.server_url == server_url)
if handle:
query &= (db.oid_associations.handle == handle)
rows = db(query).select(orderby=db.oid_associations.issued)
keep_assoc, _ = self._removeExpiredAssocations(rows)
if len(keep_assoc) == 0:
return None
else:
assoc = keep_assoc.pop(
) # pop the last one as it should be the latest one
return Association(assoc['handle'],
assoc['secret'],
assoc['issued'],
assoc['lifetime'],
assoc['assoc_type'])
def removeAssociation(self, server_url, handle):
db = self.database
query = (db.oid_associations.server_url == server_url) & (
db.oid_associations.handle == handle)
return db(query).delete() is not None
def useNonce(self, server_url, timestamp, salt):
"""
This method returns Falase if a nonce has been used before or its
timestamp is not current.
"""
db = self.database
if abs(timestamp - time.time()) > nonce.SKEW:
return False
query = (db.oid_nonces.server_url == server_url) & (db.oid_nonces.itimestamp == timestamp) & (db.oid_nonces.salt == salt)
if db(query).count() > 0:
return False
else:
db.oid_nonces.insert(server_url=server_url,
itimestamp=timestamp,
salt=salt)
return True
def _removeExpiredAssocations(self, rows):
"""
This helper function is not part of the interface. Given a list of
association rows it checks which associations have expired and
deletes them from the db. It returns a tuple of the form
([valid_assoc], no_of_expired_assoc_deleted).
"""
db = self.database
keep_assoc = []
remove_assoc = []
t1970 = time.time()
for r in rows:
if r['issued'] + r['lifetime'] < t1970:
remove_assoc.append(r)
else:
keep_assoc.append(r)
for r in remove_assoc:
del db.oid_associations[r['id']]
return (keep_assoc, len(remove_assoc)) # return tuple (list of valid associations, number of deleted associations)
def cleanupNonces(self):
"""
Remove expired nonce entries from DB and return the number
of entries deleted.
"""
db = self.database
query = (db.oid_nonces.itimestamp < time.time() - nonce.SKEW)
return db(query).delete()
def cleanupAssociations(self):
"""
Remove expired associations from db and return the number
of entries deleted.
"""
db = self.database
query = (db.oid_associations.id > 0)
return self._removeExpiredAssocations(db(query).select())[1] # return number of assoc removed
def cleanup(self):
"""
This method should be run periodically to free the db from
expired nonce and association entries.
"""
return self.cleanupNonces(), self.cleanupAssociations()
| gpl-3.0 |
fhaoquan/shadowsocks | shadowsocks/lru_cache.py | 983 | 4290 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import collections
import logging
import time
# this LRUCache is optimized for concurrency, not QPS
# n: concurrency, keys stored in the cache
# m: visits not timed out, proportional to QPS * timeout
# get & set is O(1), not O(n). thus we can support very large n
# TODO: if timeout or QPS is too large, then this cache is not very efficient,
# as sweep() causes long pause
class LRUCache(collections.MutableMapping):
"""This class is not thread safe"""
def __init__(self, timeout=60, close_callback=None, *args, **kwargs):
self.timeout = timeout
self.close_callback = close_callback
self._store = {}
self._time_to_keys = collections.defaultdict(list)
self._keys_to_last_time = {}
self._last_visits = collections.deque()
self._closed_values = set()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._time_to_keys[t].append(key)
self._last_visits.append(t)
return self._store[key]
def __setitem__(self, key, value):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._store[key] = value
self._time_to_keys[t].append(key)
self._last_visits.append(t)
def __delitem__(self, key):
# O(1)
del self._store[key]
del self._keys_to_last_time[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def sweep(self):
# O(m)
now = time.time()
c = 0
while len(self._last_visits) > 0:
least = self._last_visits[0]
if now - least <= self.timeout:
break
if self.close_callback is not None:
for key in self._time_to_keys[least]:
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
value = self._store[key]
if value not in self._closed_values:
self.close_callback(value)
self._closed_values.add(value)
for key in self._time_to_keys[least]:
self._last_visits.popleft()
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
del self._store[key]
del self._keys_to_last_time[key]
c += 1
del self._time_to_keys[least]
if c:
self._closed_values.clear()
logging.debug('%d keys swept' % c)
def test():
c = LRUCache(timeout=0.3)
c['a'] = 1
assert c['a'] == 1
time.sleep(0.5)
c.sweep()
assert 'a' not in c
c['a'] = 2
c['b'] = 3
time.sleep(0.2)
c.sweep()
assert c['a'] == 2
assert c['b'] == 3
time.sleep(0.2)
c.sweep()
c['b']
time.sleep(0.2)
c.sweep()
assert 'a' not in c
assert c['b'] == 3
time.sleep(0.5)
c.sweep()
assert 'a' not in c
assert 'b' not in c
global close_cb_called
close_cb_called = False
def close_cb(t):
global close_cb_called
assert not close_cb_called
close_cb_called = True
c = LRUCache(timeout=0.1, close_callback=close_cb)
c['s'] = 1
c['s']
time.sleep(0.1)
c['s']
time.sleep(0.3)
c.sweep()
if __name__ == '__main__':
test()
| apache-2.0 |
mikalstill/nova | nova/cmd/common.py | 3 | 6933 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common functions used by different CLI interfaces.
"""
from __future__ import print_function
import argparse
import traceback
from oslo_log import log as logging
import six
import nova.conf
import nova.db.api
from nova import exception
from nova.i18n import _
from nova import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
def block_db_access(service_name):
"""Blocks Nova DB access."""
class NoDB(object):
def __getattr__(self, attr):
return self
def __call__(self, *args, **kwargs):
stacktrace = "".join(traceback.format_stack())
LOG.error('No db access allowed in %(service_name)s: '
'%(stacktrace)s',
dict(service_name=service_name, stacktrace=stacktrace))
raise exception.DBNotAllowed(service_name)
nova.db.api.IMPL = NoDB()
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = utils.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
if six.get_method_self(fn) is not None:
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
return missing
# Decorators for actions
def args(*args, **kwargs):
"""Decorator which adds the given args and kwargs to the args list of
the desired func's __dict__.
"""
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers, categories):
"""Adds command parsers to the given subparsers.
Adds version and bash-completion parsers.
Adds a parser with subparsers for each category in the categories dict
given.
"""
parser = subparsers.add_parser('version')
parser = subparsers.add_parser('bash-completion')
parser.add_argument('query_category', nargs='?')
for category in categories:
command_object = categories[category]()
desc = getattr(command_object, 'description', None)
parser = subparsers.add_parser(category, description=desc)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(
action, description=getattr(action_fn, 'description', desc))
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
# we must handle positional parameters (ARG) separately from
# positional parameters (--opt). Detect this by checking for
# the presence of leading '--'
if args[0] != args[0].lstrip('-'):
kwargs.setdefault('dest', args[0].lstrip('-'))
if kwargs['dest'].startswith('action_kwarg_'):
action_kwargs.append(
kwargs['dest'][len('action_kwarg_'):])
else:
action_kwargs.append(kwargs['dest'])
kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
else:
action_kwargs.append(args[0])
args = ['action_kwarg_' + arg for arg in args]
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument('action_args', nargs='*',
help=argparse.SUPPRESS)
def print_bash_completion(categories):
if not CONF.category.query_category:
print(" ".join(categories.keys()))
elif CONF.category.query_category in categories:
fn = categories[CONF.category.query_category]
command_object = fn()
actions = methods_of(command_object)
print(" ".join([k for (k, v) in actions]))
def get_action_fn():
fn = CONF.category.action_fn
fn_args = []
for arg in CONF.category.action_args:
if isinstance(arg, six.binary_type):
arg = arg.decode('utf-8')
fn_args.append(arg)
fn_kwargs = {}
for k in CONF.category.action_kwargs:
v = getattr(CONF.category, 'action_kwarg_' + k)
if v is None:
continue
if isinstance(v, six.binary_type):
v = v.decode('utf-8')
fn_kwargs[k] = v
# call the action with the remaining arguments
# check arguments
missing = validate_args(fn, *fn_args, **fn_kwargs)
if missing:
# NOTE(mikal): this isn't the most helpful error message ever. It is
# long, and tells you a lot of things you probably don't want to know
# if you just got a single arg wrong.
print(fn.__doc__)
CONF.print_help()
raise exception.Invalid(
_("Missing arguments: %s") % ", ".join(missing))
return fn, fn_args, fn_kwargs
def action_description(text):
"""Decorator for adding a description to command action.
To display help text on action call instead of common category help text
action function can be decorated.
command <category> <action> -h will show description and arguments.
"""
def _decorator(func):
func.description = text
return func
return _decorator
| apache-2.0 |
ojengwa/oh-mainline | vendor/packages/Django/django/db/backends/oracle/base.py | 103 | 36127 | """
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import datetime
import decimal
import sys
import warnings
def _setup_environment(environ):
import platform
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
import os
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
from django.conf import settings
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils.encoding import force_bytes, force_text
from django.utils import six
from django.utils import timezone
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will
# also be True for cx_Oracle 5.1 and in Python 3.0. See #19606
if int(Database.version.split('.', 1)[0]) >= 5 and \
(int(Database.version.split('.', 2)[1]) >= 1 or
not hasattr(Database, 'UNICODE')):
convert_unicode = force_text
else:
convert_unicode = force_bytes
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
needs_datetime_string_cast = False
interprets_empty_strings_as_nulls = True
uses_savepoints = True
has_select_for_update = True
has_select_for_update_nowait = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
supports_subqueries_in_group_by = False
supports_transactions = True
supports_timezones = False
supports_bitwise_or = False
can_defer_constraint_checks = True
ignores_nulls_in_unique_constraints = False
has_bulk_insert = True
supports_tablespaces = True
supports_sequence_reset = False
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
sq_name = self._get_sequence_name(table)
tr_name = self._get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % locals()
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def cache_key_culling_sql(self):
return """
SELECT cache_key
FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
WHERE rank = %%s + 1
"""
def date_extract_sql(self, lookup_type, field_name):
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions42a.htm#1017163
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type, field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the interval functionality for expressions
format for Oracle:
(datefield + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6))
"""
minutes, seconds = divmod(timedelta.seconds, 60)
hours, minutes = divmod(minutes, 60)
days = str(timedelta.days)
day_precision = len(days)
fmt = "(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))"
return fmt % (sql, connector, days, hours, minutes, seconds,
timedelta.microseconds, day_precision)
def date_trunc_sql(self, lookup_type, field_name):
# Oracle uses TRUNC() for both dates and numbers.
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions155a.htm#SQLRF06151
if lookup_type == 'day':
sql = 'TRUNC(%s)' % field_name
else:
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type)
return sql
def convert_values(self, value, field):
if isinstance(value, Database.LOB):
value = value.read()
if field and field.get_internal_type() == 'TextField':
value = force_text(value)
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and field and field.empty_strings_allowed:
value = ''
# Convert 1 or 0 to True or False
elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
value = bool(value)
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
# Convert floats to decimals
elif value is not None and field and field.get_internal_type() == 'DecimalField':
value = util.typecast_decimal(field.format_number(value))
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime. We use the type
# of the Field to determine which to cast to, but it's not
# always available.
# As a workaround, we cast to date if all the time-related
# values are 0, or to time if the date is 1/1/1900.
# This could be cleaned a bit by adding a method to the Field
# classes to normalize values from the database (the to_python
# method is used for validation and isn't what we want here).
elif isinstance(value, Database.Timestamp):
if field and field.get_internal_type() == 'DateTimeField':
pass
elif field and field.get_internal_type() == 'DateField':
value = value.date()
elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1):
value = value.time()
elif value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
return value
def datetime_cast_sql(self):
return "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')"
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return int(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_executed_query(self, cursor, sql, params):
# http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
if six.PY3:
return cursor.statement
else:
query = cursor.statement
return query if isinstance(query, unicode) else query.decode("utf-8")
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return force_text(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name.upper(),
self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%','%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, then create
# a cursor to initialize it and recur.
self.connection.cursor()
return self.connection.ops.regex_lookup(lookup_type)
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)))
for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
sequence_name = self._get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': sequence_name,
'table': table_name,
'column': column_name}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = self._get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.rel.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = self._get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def value_to_db_datetime(self, value):
if value is None:
return None
# Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return datetime.datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def year_lookup_bounds_for_date_field(self, value):
first = '%s-01-01'
second = '%s-12-31'
return [first % value, second % value]
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def _get_sequence_name(self, table):
name_length = self.max_name_length() - 3
return '%s_SQ' % util.truncate_name(table, name_length).upper()
def _get_trigger_name(self, table):
name_length = self.max_name_length() - 3
return '%s_TR' % util.truncate_name(table, name_length).upper()
def bulk_insert_sql(self, fields, num_values):
items_sql = "SELECT %s FROM DUAL" % ", ".join(["%s"] * len(fields))
return " UNION ALL ".join([items_sql] * num_values)
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.oracle_version = None
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def _valid_connection(self):
return self.connection is not None
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def _cursor(self):
cursor = None
if not self._valid_connection():
conn_string = convert_unicode(self._connect_string())
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
self.connection = Database.connect(conn_string, **conn_params)
cursor = FormatStylePlaceholderCursor(self.connection)
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set oracle date to ansi date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ (" TIME_ZONE = 'UTC'" if settings.USE_TZ else ''))
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except utils.DatabaseError:
self.operators = self._likec_operators
else:
self.operators = self._standard_operators
try:
self.oracle_version = int(self.connection.version.split('.')[0])
# There's no way for the DatabaseOperations class to know the
# currently active Oracle version, so we do some setups here.
# TODO: Multi-db support will need a better solution (a way to
# communicate the current version).
if self.oracle_version <= 9:
self.ops.regex_lookup = self.ops.regex_lookup_9
else:
self.ops.regex_lookup = self.ops.regex_lookup_10
except ValueError:
pass
try:
self.connection.stmtcachesize = 20
except:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
connection_created.send(sender=self.__class__, connection=self)
if not cursor:
cursor = FormatStylePlaceholderCursor(self.connection)
return cursor
# Oracle doesn't support savepoint commits. Ignore them.
def _savepoint_commit(self, sid):
pass
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError as e:
# In case cx_Oracle implements (now or in a future version)
# raising this specific exception
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and isinstance(param, datetime.datetime):
if timezone.is_naive(param):
warnings.warn("Oracle received a naive datetime (%s)"
" while time zone support is active." % param,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
param = timezone.make_aware(param, default_timezone)
param = param.astimezone(timezone.utc).replace(tzinfo=None)
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = "1"
elif param is False:
param = "0"
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
else:
self.force_bytes = convert_unicode(param, cursor.charset,
strings_only)
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif isinstance(param, six.string_types) and len(param) > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instanciate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
return tuple([OracleParam(p, self, True) for p in params])
def _guess_input_sizes(self, params_list):
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
return [p.force_bytes for p in params]
def execute(self, query, params=None):
if params is None:
params = []
else:
params = self._format_params(params)
args = [(':arg%d' % i) for i in range(len(params))]
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
query = convert_unicode(query % tuple(args), self.charset)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def executemany(self, query, params=None):
# cx_Oracle doesn't support iterators, convert them to lists
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
try:
args = [(':arg%d' % i) for i in range(len(params[0]))]
except (IndexError, TypeError):
# No params given, nothing to do
return None
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
query = convert_unicode(query % tuple(args), self.charset)
formatted = [self._format_params(i) for i in params]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchmany(size)])
def fetchall(self):
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchall()])
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(six.Iterator):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
# datetimes are returned as TIMESTAMP, except the results
# of "dates" queries, which are returned as DATETIME.
elif desc[1] in (Database.TIMESTAMP, Database.DATETIME):
# Confirm that dt is naive before overwriting its tzinfo.
if settings.USE_TZ and value is not None and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, six.string_types):
return force_text(s)
return s
def _get_sequence_reset_sql():
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
return """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
| agpl-3.0 |
allenlavoie/tensorflow | tensorflow/contrib/gan/python/eval/python/sliced_wasserstein_test.py | 21 | 4991 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Sliced Wasserstein Distance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import ndimage
from tensorflow.contrib.gan.python.eval.python import sliced_wasserstein_impl as swd
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class ClassifierMetricsTest(test.TestCase):
def test_laplacian_pyramid(self):
# The numpy/scipy code for reference estimation comes from:
# https://github.com/tkarras/progressive_growing_of_gans
gaussian_filter = np.float32([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [
6, 24, 36, 24, 6
], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]]) / 256.0
def np_pyr_down(minibatch): # matches cv2.pyrDown()
assert minibatch.ndim == 4
return ndimage.convolve(
minibatch,
gaussian_filter[np.newaxis, np.newaxis, :, :],
mode='mirror')[:, :, ::2, ::2]
def np_pyr_up(minibatch): # matches cv2.pyrUp()
assert minibatch.ndim == 4
s = minibatch.shape
res = np.zeros((s[0], s[1], s[2] * 2, s[3] * 2), minibatch.dtype)
res[:, :, ::2, ::2] = minibatch
return ndimage.convolve(
res,
gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0,
mode='mirror')
def np_laplacian_pyramid(minibatch, num_levels):
# Note: there's a bug in the original SWD, fixed repeatability.
pyramid = [minibatch.astype('f').copy()]
for _ in range(1, num_levels):
pyramid.append(np_pyr_down(pyramid[-1]))
pyramid[-2] -= np_pyr_up(pyramid[-1])
return pyramid
data = np.random.normal(size=[256, 3, 32, 32]).astype('f')
pyramid = np_laplacian_pyramid(data, 3)
data_tf = array_ops.placeholder(dtypes.float32, [256, 32, 32, 3])
pyramid_tf = swd._laplacian_pyramid(data_tf, 3)
with self.test_session() as sess:
pyramid_tf = sess.run(
pyramid_tf, feed_dict={
data_tf: data.transpose(0, 2, 3, 1)
})
for x in range(3):
self.assertAllClose(
pyramid[x].transpose(0, 2, 3, 1), pyramid_tf[x], atol=1e-6)
def test_sliced_wasserstein_distance(self):
"""Test the distance."""
d1 = random_ops.random_uniform([256, 32, 32, 3])
d2 = random_ops.random_normal([256, 32, 32, 3])
wfunc = swd.sliced_wasserstein_distance(d1, d2)
with self.test_session() as sess:
wscores = [sess.run(x) for x in wfunc]
self.assertAllClose(
np.array([0.014, 0.014], 'f'),
np.array([x[0] for x in wscores], 'f'),
rtol=0.15)
self.assertAllClose(
np.array([0.014, 0.020], 'f'),
np.array([x[1] for x in wscores], 'f'),
rtol=0.15)
def test_sliced_wasserstein_distance_svd(self):
"""Test the distance."""
d1 = random_ops.random_uniform([256, 32, 32, 3])
d2 = random_ops.random_normal([256, 32, 32, 3])
wfunc = swd.sliced_wasserstein_distance(d1, d2, use_svd=True)
with self.test_session() as sess:
wscores = [sess.run(x) for x in wfunc]
self.assertAllClose(
np.array([0.013, 0.013], 'f'),
np.array([x[0] for x in wscores], 'f'),
rtol=0.15)
self.assertAllClose(
np.array([0.014, 0.019], 'f'),
np.array([x[1] for x in wscores], 'f'),
rtol=0.15)
def test_swd_mismatched(self):
"""Test the inputs mismatched shapes are detected."""
d1 = random_ops.random_uniform([256, 32, 32, 3])
d2 = random_ops.random_normal([256, 32, 31, 3])
d3 = random_ops.random_normal([256, 31, 32, 3])
d4 = random_ops.random_normal([255, 32, 32, 3])
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d2)
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d3)
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d4)
def test_swd_not_rgb(self):
"""Test that only RGB is supported."""
d1 = random_ops.random_uniform([256, 32, 32, 1])
d2 = random_ops.random_normal([256, 32, 32, 1])
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d2)
if __name__ == '__main__':
test.main()
| apache-2.0 |
technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/site-packages/pygments/styles/colorful.py | 31 | 2778 | # -*- coding: utf-8 -*-
"""
pygments.styles.colorful
~~~~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by CodeRay.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class ColorfulStyle(Style):
"""
A colorful style, inspired by CodeRay.
"""
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "#888",
Comment.Preproc: "#579",
Comment.Special: "bold #cc0000",
Keyword: "bold #080",
Keyword.Pseudo: "#038",
Keyword.Type: "#339",
Operator: "#333",
Operator.Word: "bold #000",
Name.Builtin: "#007020",
Name.Function: "bold #06B",
Name.Class: "bold #B06",
Name.Namespace: "bold #0e84b5",
Name.Exception: "bold #F00",
Name.Variable: "#963",
Name.Variable.Instance: "#33B",
Name.Variable.Class: "#369",
Name.Variable.Global: "bold #d70",
Name.Constant: "bold #036",
Name.Label: "bold #970",
Name.Entity: "bold #800",
Name.Attribute: "#00C",
Name.Tag: "#070",
Name.Decorator: "bold #555",
String: "bg:#fff0f0",
String.Char: "#04D bg:",
String.Doc: "#D42 bg:",
String.Interpol: "bg:#eee",
String.Escape: "bold #666",
String.Regex: "bg:#fff0ff #000",
String.Symbol: "#A60 bg:",
String.Other: "#D20",
Number: "bold #60E",
Number.Integer: "bold #00D",
Number.Float: "bold #60E",
Number.Hex: "bold #058",
Number.Oct: "bold #40E",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "#F00 bg:#FAA"
}
| gpl-3.0 |
omerwe/PCGCs | deprecated/pcgcs_direct.py | 1 | 39643 | from __future__ import division
import numpy as np
import scipy.stats as stats
import scipy.linalg as la
import sys
import random
import time
import os
import os.path
import pandas as pd
import itertools
np.set_printoptions(precision=4, linewidth=200)
from sklearn.linear_model import LinearRegression, LogisticRegression
import pcgcs_utils
from pcgcs_utils import print_memory_usage
# def print_memory():
# import psutil
# process = psutil.Process(os.getpid())
# print 'memory usage:', process.memory_info().rss
def print_sumstats(cov1, u1_0, u1_1, var_t1, cov2, u2_0=None, u2_1=None, var_t2=None, s1=None, sum_s1=None, sum_s1_sqr=None, s2=None, sum_s2=None, sum_s2_sqr=None):
if (cov1 is None and cov2 is None and sum_s1 is None and sum_s2 is None): return
print
print
print 'summary statistics for subsequent estimation:'
print '-----------------------------------'
if (cov1 is not None):
print 'mean Q for study 1 (mean_Q1): %0.4f'%(np.mean((u1_0 + u1_1)**2))
print 'liability variance explained by covariates (var_t1): %0.4f'%(var_t1)
if (cov2 is not None):
print 'mean Q for study 2 (mean_Q2): %0.4f'%(np.mean((u2_0 + u2_1)**2))
print 'liability variance explained by covariates (var_t2): %0.4f'%(var_t2)
if (sum_s1 is not None):
print 'study 1 genotypes deflation factor (geno1_factor): %0.6f'%((sum_s1 - s1.sum()) / sum_s1)
print 'study 1 squared genotypes deflation factor (sqr_geno1_factor): %0.6f'%((sum_s1_sqr - np.sum(s1**2)) / sum_s1_sqr)
if (sum_s2 is not None):
print 'study 2 genotypes deflation factor (geno2_factor): %0.6f'%((sum_s2 - s2.sum()) / sum_s2)
print 'study 2 squared genotypes deflation factor (sqr_geno2_factor): %0.6f'%((sum_s2_sqr - np.sum(s2**2)) / sum_s2_sqr)
print
print
def pcgc_jackknife_sig2g(X, y, numer_sig2g, denom_sig2g, pcgc_coeff=1.0, u0=None, u1=None, window_size=1000):
if (u0 is not None):
u_sqr = (u0 + u1)**2
qy = y * (u0+u1)
else:
qy = y
if (window_size is None or window_size<0): window_size = X.shape[0]
estimators_arr = np.empty(X.shape[0])
for i in xrange(0, X.shape[0], window_size):
X_i = X[i:i+window_size]
G_i = X_i.dot(X.T) / X.shape[1]
indices0 = np.arange(G_i.shape[0])
G_i[indices0, i+indices0]=0
for j in xrange(G_i.shape[0]):
numer_sig2g_i = numer_sig2g - 2*G_i[j].dot(qy[i+j]*qy)
if (u0 is None): denom_sig2g_i = denom_sig2g - 2*G_i[j].dot(G_i[j])
else: denom_sig2g_i = denom_sig2g - 2*G_i[j].dot(G_i[j] * u_sqr[i+j]*u_sqr)
estimators_arr[i+j] = numer_sig2g_i / denom_sig2g_i
estimators_arr /= pcgc_coeff
sig2g_var = (X.shape[0]-1)/float(X.shape[0]) * np.sum((estimators_arr - estimators_arr.mean())**2)
return np.sqrt(sig2g_var)
def pcgc_jackknife_corr(X1, X2, y1, y2,
numer_sig2g1, denom_sig2g1, numer_sig2g2, denom_sig2g2, numer_rho, denom_rho,
pcgc_coeff1=1.0, pcgc_coeff2=1.0, pcgc_coeff12=1.0,
u1_0=None, u1_1=None, u2_0=None, u2_1=None,
is_same=None, window_size=1000):
if (window_size is None or window_size<0): window_size = X1.shape[0]
if (u1_0 is not None):
u1_sqr = (u1_0 + u1_1)**2
u2_sqr = (u2_0 + u2_1)**2
qy1 = y1 * (u1_0+u1_1)
qy2 = y2 * (u2_0+u2_1)
else:
qy1 = y1
qy2 = y2
sig2g1_estimators_arr = np.empty(X1.shape[0])
sig2g2_estimators_arr = np.empty(X2.shape[0])
rho_estimators_arr = np.empty(X1.shape[0] + X2.shape[0])
#exclude individuals from study 1
for i in xrange(0, X1.shape[0], window_size):
X1_i = X1[i:i+window_size]
G_i = X1_i.dot(X1.T) / X1.shape[1]
indices0 = np.arange(G_i.shape[0])
G_i[indices0, i+indices0]=0
for j in xrange(G_i.shape[0]):
numer_sig2g1_i = numer_sig2g1 - 2*G_i[j].dot(qy1[i+j]*qy1)
if (u1_0 is None): denom_sig2g1_i = denom_sig2g1 - 2*G_i[j].dot(G_i[j])
else: denom_sig2g1_i = denom_sig2g1 - 2*G_i[j].dot(G_i[j] * u1_sqr[i+j]*u1_sqr)
sig2g1_estimators_arr[i+j] = numer_sig2g1_i / denom_sig2g1_i
G_i = X1_i.dot(X2.T) / X1.shape[1]
G_i[is_same[i:i+window_size]]=0
for j in xrange(G_i.shape[0]):
numer_rho_i = numer_rho - G_i[j].dot(qy1[i+j]*qy2)
if (u1_0 is None): denom_rho_i = denom_rho - G_i[j].dot(G_i[j])
else: denom_rho_i = denom_rho - G_i[j].dot(G_i[j] * u1_sqr[i+j]*u2_sqr)
rho_estimators_arr[i+j] = numer_rho_i / denom_rho_i
#exclude individuals from study 2
for i in xrange(0, X2.shape[0], window_size):
X2_i = X2[i:i+window_size]
G_i = X2_i.dot(X2.T) / X1.shape[1]
indices0 = np.arange(G_i.shape[0])
G_i[indices0, i+indices0]=0
for j in xrange(G_i.shape[0]):
numer_sig2g2_i = numer_sig2g2 - G_i[j].dot(qy2[i+j]*qy2)
if (u2_0 is None): denom_sig2g2_i = denom_sig2g2 - 2*G_i[j].dot(G_i[j])
else: denom_sig2g2_i = denom_sig2g2 - 2*G_i[j].dot(G_i[j] * u2_sqr[i+j]*u2_sqr)
sig2g2_estimators_arr[i+j] = numer_sig2g2_i / denom_sig2g2_i
G_i = X2_i.dot(X1.T) / X1.shape[1]
G_i[is_same.T[i:i+window_size]]=0
for j in xrange(G_i.shape[0]):
numer_rho_i = numer_rho - G_i[j].dot(qy1[i+j]*qy2)
if (u1_0 is None): denom_rho_i = denom_rho - G_i[j].dot(G_i[j])
else: denom_rho_i = denom_rho - G_i[j].dot(G_i[j] * u2_sqr[i+j]*u1_sqr)
rho_estimators_arr[X1.shape[0]+i+j] = numer_rho_i / denom_rho_i
sig2g1_estimators_arr /= pcgc_coeff1
sig2g2_estimators_arr /= pcgc_coeff2
rho_estimators_arr /= pcgc_coeff12
sig2g1_var = (X1.shape[0]-1)/float(X1.shape[0]) * np.sum((sig2g1_estimators_arr - sig2g1_estimators_arr.mean())**2)
sig2g2_var = (X2.shape[0]-1)/float(X2.shape[0]) * np.sum((sig2g2_estimators_arr - sig2g2_estimators_arr.mean())**2)
rho_var = (rho_estimators_arr.shape[0]-1)/float(rho_estimators_arr.shape[0]) * np.sum((rho_estimators_arr - rho_estimators_arr.mean())**2)
#compute genetic correlation pseudo-values
sig2g1 = numer_sig2g1 / denom_sig2g1 / pcgc_coeff1
sig2g2 = numer_sig2g2 / denom_sig2g2 / pcgc_coeff2
sig2g1_estimators_arr = np.concatenate((sig2g1_estimators_arr, np.ones(X2.shape[0])*sig2g1))
sig2g2_estimators_arr = np.concatenate((np.ones(X1.shape[0])*sig2g2, sig2g2_estimators_arr))
corr_estimators_arr = rho_estimators_arr / np.sqrt(sig2g1_estimators_arr * sig2g2_estimators_arr)
corr_var = (corr_estimators_arr.shape[0]-1)/float(corr_estimators_arr.shape[0]) * np.sum((corr_estimators_arr - corr_estimators_arr.mean())**2)
return np.sqrt(sig2g1_var), np.sqrt(sig2g2_var), np.sqrt(rho_var), np.sqrt(corr_var)
# # # def permutation_test(G, yyT, is_same, num_perms=10000):
# # # x = G.reshape(-1)
# # # y = yyT.reshape(-1)
# # # x = x[~(is_same.reshape(-1))]
# # # y = y[~(is_same.reshape(-1))]
# # # real_stat = x.dot(y)
# # # null_stats = np.empty(num_perms)
# # # for i in xrange(num_perms):
# # # if (i>0 and i % 100 == 0): print 'finished %d/%d permutations'%(i, num_perms)
# # # np.random.shuffle(y)
# # # null_stats[i] = x.dot(y)
# # # pvalue = np.mean(np.abs(null_stats) > np.abs(real_stat))
# # # if (pvalue < 1.0/num_perms): pvalue = 1.0/num_perms
# # # return pvalue
def permutation_test2(X1, y1, X2, y2, G12_issame, is_same1, is_same2, num_perms=10000):
has_same = (G12_issame.shape[0] > 0)
c = float(X1.shape[1])
y1 = y1.copy()
y2 = y2.copy()
null_stats = np.empty(num_perms)
z1 = y1.dot(X1)
z2 = y2.dot(X2)
real_stat = z1.dot(z2) / c
if has_same: real_stat -= G12_issame.dot(y1[is_same1] * y2[is_same2])
for i in xrange(num_perms):
if (i>0 and i % 100 == 0): print 'finished %d/%d permutations'%(i, num_perms)
np.random.shuffle(y1)
np.random.shuffle(y2)
z1 = y1.dot(X1)
z2 = y2.dot(X2)
null_stats[i] = z1.dot(z2) / c
if has_same: null_stats[i] -= G12_issame.dot(y1[is_same1] * y2[is_same2])
pvalue = np.mean(np.abs(null_stats) > np.abs(real_stat))
if (pvalue < 1.0/num_perms): pvalue = 1.0/num_perms
return pvalue
def permutation_test_heritability(X, y, G_diag, num_perms=10000):
c = float(X.shape[1])
y = y.copy()
null_stats = np.empty(num_perms)
z = y.dot(X)
real_stat = z.dot(z) / c
real_stat -= G_diag.dot(y**2)
for i in xrange(num_perms):
if (i>0 and i % 100 == 0): print 'finished %d/%d permutations'%(i, num_perms)
np.random.shuffle(y)
z = y.dot(X)
null_stats[i] = z.dot(z) / c
null_stats[i] -= G_diag.dot(y**2)
pvalue = np.mean(np.abs(null_stats) > np.abs(real_stat))
if (pvalue < 1.0/num_perms): pvalue = 1.0/num_perms
return pvalue
#compute the PCGC denominator with limited memory, by only storing matrices of size (window_size x sample_size)
def pcgc_denom_lowmem(X1, X2, u1_0, u1_1, u2_0, u2_1, is_same=None, window_size=1000):
print_memory_usage(7)
denom=0
if (window_size is None or window_size<0): window_size = X1.shape[0]
for i in xrange(0, X1.shape[0], window_size):
G_i = X1[i:i+window_size].dot(X2.T)
if (is_same is None):
indices0 = np.arange(G_i.shape[0])
G_i[indices0, i+indices0]=0
else: G_i[is_same[i:i+window_size]] = 0
u1_0_i = u1_0[i:i+window_size]
u1_1_i = u1_1[i:i+window_size]
denom += np.einsum('ij,ij,i,j',G_i,G_i,u1_0_i**2,u2_0**2) + np.einsum('ij,ij,i,j',G_i,G_i,u1_0_i**2,u2_1**2) + np.einsum('ij,ij,i,j',G_i,G_i,u1_1_i**2,u2_0**2) + np.einsum('ij,ij,i,j',G_i,G_i,u1_1_i**2,u2_1**2)
denom += 2 * (
np.einsum('ij,ij,i,j->', G_i, G_i, u1_0_i**2,u2_0*u2_1)
+ np.einsum('ij,ij,i,j->', G_i, G_i, u1_0_i*u1_1_i,u2_0**2)
+ np.einsum('ij,ij,i,j->', G_i, G_i, u1_0_i*u1_1_i,u2_0*u2_1)
+ np.einsum('ij,ij,i,j->', G_i, G_i, u1_0_i*u1_1_i,u2_1*u2_0)
+ np.einsum('ij,ij,i,j->', G_i, G_i, u1_0_i*u1_1_i,u2_1**2)
+ np.einsum('ij,ij,i,j->', G_i, G_i, u1_1_i**2, u2_1*u2_0)
)
denom /= X1.shape[1]**2
return denom
#compute the PCGC denominator with limited memory, by only storing matrices of size (window_size x sample_size), without covariates
def pcgc_denom_lowmem_nocov(X1, X2, is_same=None, window_size=1000):
print_memory_usage(6)
denom=0
if (window_size is None or window_size<0): window_size = X1.shape[0]
for i in xrange(0, X1.shape[0], window_size):
G_i = X1[i:i+window_size].dot(X2.T)
if (is_same is None):
indices0 = np.arange(G_i.shape[0])
G_i[indices0, i+indices0]=0
else: G_i[is_same[i:i+window_size]] = 0
denom += np.einsum('ij,ij',G_i,G_i)
denom /= X1.shape[1]**2
return denom
def write_sumstats(z, n, snpNames, out_file, compute_p=True):
#Compute p-values
t = z / np.sqrt(n)
t[t>1.0] = 1.0
t[t<-1.0] = -1.0
degrees_f = n-2
TINY = 1.0e-20
stat = t * np.sqrt(degrees_f / ((1.0-t+TINY) * (1.0+t+TINY)))
pvals = stats.t.sf(np.abs(stat), degrees_f)*2
if not compute_p: pvals[:] = np.zeros(pvals.shape[0]) + np.nan
df = pd.DataFrame(snpNames, columns=['snpid'])
df['a1'] = ['1']*len(pvals)
df['a2'] = ['2']*len(pvals)
df['N'] = [n]*len(pvals)
df['P'] = pvals
df['Z'] = z
if (len(out_file) < 4 or out_file[-5:] != '.gzip' or out_file[-3:] != '.gz'): out_file += '.gz'
df.to_csv(out_file, sep='\t', index=False, float_format='%0.6e', compression='gzip', na_rep='NA')
def print_preamble():
print '*********************************************************************'
print '* PCGC-direct for heritability and genetic correlation estimates'
print '* Version 1.0.0'
print '* (C) 2018 Omer Weissbrod'
print '* Technion - Israel Institute of Technology'
print '*********************************************************************'
print
#compute liability variance due to covariates
def varLiab_covar(prev, tau_i, phe):
var_E_t_given_y = prev * (1-prev) * (tau_i[phe>phe.mean()].mean() - tau_i[phe<phe.mean()].mean())**2
E_var_t_given_y = prev * np.var(tau_i[phe>phe.mean()]) + (1-prev) * np.var(tau_i[phe<phe.mean()])
var_t = var_E_t_given_y + E_var_t_given_y
return var_t
def my_linreg(X,y):
R = X.T.dot(X)
XTy = X.T.dot(y)
L = la.cho_factor(R)
coef = la.cho_solve(L, XTy)
return coef
#initial computations required for PCGC
def regress_given_PCs(X, cov, PC_indices):
assert np.all(PC_indices <= cov.shape[1]), 'given PC number cannot be larger than %d'%(cov.shape[1])
assert np.all(PC_indices > 0)
assert np.all(~np.isnan(cov))
assert np.all(~np.isnan(X))
coef = my_linreg(cov[:, PC_indices-1], X)
X -= cov[:, PC_indices-1].dot(coef)
# linreg = LinearRegression(fit_intercept=False)
# linreg.fit(cov[:, PC_indices-1], X)
# X -= linreg.predict(cov[:, PC_indices-1])
return X
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
#parameters for exact computations
parser.add_argument('--sumstats_only', metavar='sumstats_only', type=int, default=0, help='If set to 1, PCGC-s will only compute summary statistics and print them to files, without estimating variance components (default 0)')
parser.add_argument('--bfile1', metavar='bfile1', required=True, help='plink file for study 1')
parser.add_argument('--bfile2', metavar='bfile2', default=None, help='plink file for study 2')
parser.add_argument('--pheno1', metavar='pheno1', required=True, help='phenotypes file for study 1')
parser.add_argument('--pheno2', metavar='pheno2', default=None, help='phenotypes file for study 2')
parser.add_argument('--covar1', metavar='covar1', default=None, help='covariates file for study 1')
parser.add_argument('--covar2', metavar='covar2', default=None, help='covariates file for study 2')
parser.add_argument('--prev1', metavar='prev1', type=float, required=True, help='population prevalence of study 1')
parser.add_argument('--prev2', metavar='prev2', type=float, default=None, help='population prevalence of study 2')
parser.add_argument('--extract', metavar='extract', default=None, help='file with list of SNPs to use')
parser.add_argument('--keep1', metavar='keep1', default=None, help='file with list of individuals to use in study 1')
parser.add_argument('--keep2', metavar='keep2', default=None, help='file with list of individuals to use in study 2')
parser.add_argument('--norm', metavar='norm', default=None, help='SNPs normalization method (see help file)')
parser.add_argument('--maf', metavar='maf', default=None, help='MAFs file (to be used with "--norm maf" option)')
parser.add_argument('--numPCs1', metavar='numPCs1', type=int, default=0, help='#PCs to regress out of dataset 1')
parser.add_argument('--numPCs2', metavar='numPCs2', type=int, default=0, help='#PCs to regress out of dataset 2')
parser.add_argument('--chr', metavar='chr', type=int, default=None, help='use only SNPs from a specific chromosome')
parser.add_argument('--missingPhenotype', metavar='missingPhenotype', default='-9', help='identifier for missing values (default: -9)')
parser.add_argument('--center', metavar='center', type=int, default=1, help='whether to center SNPs prior to computing kinship (0 or 1, default 1)')
parser.add_argument('--mem_size', metavar='mem_size', type=int, default=1000, help='The maximum number of rows in each kinship matrix to be computed. Larger values will improve run-time take up more memory')
parser.add_argument('--jackknife', metavar='jackknife', type=int, default=0, help='Whether jackknife-based standard errors will be computed (0 or 1, default 1)')
parser.add_argument('--num_perms', metavar='num_perms', type=int, default=0, help='number of permutation testing iterations')
parser.add_argument('--z1_nocov_out', metavar='z1_nocov_out', default=None, help='output file for Z-score statistics for study 1 without covariates')
parser.add_argument('--z2_nocov_out', metavar='z2_nocov_out', default=None, help='output file for Z-score statistics for study 2 without covariates')
parser.add_argument('--z1_cov_out', metavar='z1_cov_out', default=None, help='output file for Z-score statistics for study 1 with covariates')
parser.add_argument('--z2_cov_out', metavar='z2_cov_out', default=None, help='output file for Z-score statistics for study 2 with covariates')
parser.add_argument('--Gty1_nocov_out', metavar='Gty1_nocov_out', default=None, help='output file for covariate-less summary information for individuals in study 1')
parser.add_argument('--Gty2_nocov_out', metavar='Gty2_nocov_out', default=None, help='output file for covariate-less summary information for individuals in study 2')
parser.add_argument('--Gty1_cov_out', metavar='Gty1_cov_out', default=None, help='output file for covariates-summary information for individuals in study 1')
parser.add_argument('--Gty2_cov_out', metavar='Gty2_cov_out', default=None, help='output file for covariates-summary information for individuals in study 2')
parser.add_argument('--PC1', metavar='PC1', default=None, help='comma-separated indices of covariates that are PCs in covar1 (starting from 1)')
parser.add_argument('--PC2', metavar='PC2', default=None, help='comma-separated indices of covariates that are PCs in covar2 (starting from 1)')
parser.add_argument('--snp1', metavar='snp1', default=None, type=int, help='read only a subset of SNPs starting from snp1, starting from 1 (must be specified with snp2)')
parser.add_argument('--snp2', metavar='snp1', default=None, type=int, help='read only a subset of SNPs ending with snp2, starting from 1 (must be specified with snp1)')
parser.add_argument('--snp_weights', metavar='snp_weights', default=None, help='snp weights file (two columns: snp name, weight)')
args = parser.parse_args()
print_preamble()
#validate command line arguments
#####################################################################################
if (args.bfile2 is not None):
assert args.pheno2 is not None, '--pheno2 must be specified with --bfile2'
assert args.prev2 is not None, '--prev2 must be specified with --bfile2'
if (args.bfile2 is None):
assert args.keep2 is None, '--keep2 cannot be specified without --bfile2'
assert args.covar2 is None, '--covar2 cannot be specified without --bfile2'
assert args.prev2 is None, '--prev2 cannot be specified without --bfile2'
assert args.pheno2 is None, '--pheno2 cannot be specified without --bfile2'
assert args.z2_nocov_out is None, '--z2_nocov_out cannot be specified without --bfile2'
assert args.z2_cov_out is None, '--z2_cov_out cannot be specified without --bfile2'
assert args.numPCs2==0, '--numPCs2 cannot be specified without --bfile2'
assert args.PC2 is None, '--PC2 cannot be specified without --bfile2'
if (args.numPCs1>0): assert args.PC1 is None, 'PC1 cannot be specified with numPCs1'
if (args.numPCs2>0): assert args.PC2 is None, 'PC2 cannot be specified with numPCs2'
if (args.PC1 is not None):
assert args.covar1 is not None, '--PC1 cannot be specified without --covar1'
args.PC1 = np.array(args.PC1.split(','), dtype=np.int)
assert np.all(args.PC1 >= 1), '--PC1 numbers must be >=1'
if (args.PC2 is not None):
assert args.covar2 is not None, '--PC2 cannot be specified without --covar2'
args.PC2 = np.array(args.PC2.split(','), dtype=np.int)
assert np.all(args.PC2 >= 1), '--PC2 numbers must be >=1'
if (args.snp1 is not None):
assert args.snp1>=1, '--snp1 must be >=1'
assert args.snp2 is not None, '--snp1 must be specified with --snp2'
assert args.bfile2 is None, '--snp1 cannot be specified when two bfiles are provided'
assert args.Gty1_nocov_out is None, 'Gty1_nocov_out cannot be specified when --snp1 is set'
assert args.Gty1_cov_out is None, 'Gty1_cov_out cannot be specified when --snp1 is set'
if (args.snp2 is not None): assert args.snp1 is not None, '--snp2 must be specified with --snp1'
if (args.maf is not None): assert args.norm=='maf', '--maf option can only be used when "--norm maf" option is invoked'
if (args.norm == 'maf'): assert args.maf is not None, 'maf file must be provided to use "--norm maf"'
if (args.covar1 is None):
assert args.z1_cov_out is None, 'z1_cov_out cannor be specified without covar1'
assert args.Gty1_cov_out is None, 'Gty1_out cannor be specified without covar1'
if (args.covar2 is None):
assert args.z2_cov_out is None, 'z2_cov_out cannor be specified without covar1'
assert args.Gty2_cov_out is None, 'Gty2_out cannor be specified without covar1'
if (args.sumstats_only > 0):
assert args.z1_nocov_out is not None or args.z1_cov_out is not None, 'z1_nocov_out or z1_cov_out must be defined when sumstats_only=1'
assert args.num_perms==0, 'permutation testing can not be used when sumstats_only=1'
#####################################################################################
#read and preprocess the data
X1, bed1, phe1, cov1, X2, bed2, phe2, cov2 = pcgcs_utils.read_SNPs(bfile1=args.bfile1, pheno1=args.pheno1, prev1=args.prev1, covar1=args.covar1, keep1=args.keep1, bfile2=args.bfile2, pheno2=args.pheno2, prev2=args.prev2, covar2=args.covar2, keep2=args.keep2, extract=args.extract, missingPhenotype=args.missingPhenotype, chr=args.chr, norm=args.norm, maf=args.maf, center=args.center>0, snp1=args.snp1, snp2=args.snp2)
assert np.all(~np.isnan(X1))
if (cov1 is not None): assert np.all(~np.isnan(cov1))
#regress out PCs
s1, sum_s1, sum_s1_sqr = None, None, None
if (args.PC1 is not None):
print 'regressing given PCs out of bfile1'
X1 = regress_given_PCs(X1, cov1, args.PC1)
elif (args.numPCs1>0):
print 'Regressing top %d PCs out of bfile 1'%(args.numPCs1)
X1, U1, s1, sum_s1, sum_s1_sqr = pcgcs_utils.regress_PCs(X1, args.numPCs1)
print 'done'
if (cov1 is None): cov1 = U1
else: cov1 = np.concatenate((cov1, U1), axis=1)
s2, sum_s2, sum_s2_sqr = None, None, None
if (args.PC2 is not None):
print 'regressing given PCs out of bfile2'
X2 = regress_given_PCs(X2, cov2, args.PC2)
elif (args.numPCs2>0):
print 'Regressing top %d PCs out of bfile 2'%(args.numPCs2)
X2, U2, s2, sum_s2, sum_s2_sqr = pcgcs_utils.regress_PCs(X2, args.numPCs2)
print 'done'
if (cov2 is None): cov2 = U2
else: cov2 = np.concatenate((cov2, U2), axis=1)
#apply weights
if (args.snp_weights is not None):
print 'weighting SNPs...'
df_weights = pd.read_csv(args.snp_weights, names=['snp', 'weigt'], delim_whitespace=True, header=None, index_col='snp', squeeze=True)
###import ipdb; ipdb.set_trace()
assert np.all(np.isin(bed1.sid, df_weights.index)), 'not all SNPs have weights'
df_weights = df_weights.loc[bed1.sid]
assert df_weights.shape[0] == len(bed1.sid)
snp_weights = df_weights.values
assert np.all(snp_weights>=0)
X1 *= np.sqrt(snp_weights * X1.shape[1]/snp_weights.sum())
if (bed2 is not None):
X2 *= np.sqrt(snp_weights * X2.shape[1]/snp_weights.sum())
#print plink file sizes
print_memory_usage(3.1)
print 'bfile1: %d cases, %d controls, %d SNPs'%(np.sum(phe1>phe1.mean()), np.sum(phe1<=phe1.mean()), bed1.sid.shape[0])
print_memory_usage(3.2)
if (args.sumstats_only==0 or args.Gty1_nocov_out is not None or args.Gty1_cov_out is not None):
G1_diag = np.einsum('ij,ij->i', X1,X1) / float(X1.shape[1])
###print 'G1_diag:', G1_diag[:10]
print_memory_usage(3.3)
if (bed2 is not None):
if (args.sumstats_only==0 or args.Gty2_nocov_out is not None or args.Gty2_cov_out is not None):
G2_diag = np.einsum('ij,ij->i', X2,X2) / float(X2.shape[1])
print 'bfile2: %d cases, %d controls, %d SNPs'%(np.sum(phe2>phe2.mean()), np.sum(phe2<=phe2.mean()), bed2.sid.shape[0])
print_memory_usage(4)
#PCGC initial computations
y1_norm, tau_i_1, pcgc_coeff1, ty1, u1_0, u1_1 = pcgcs_utils.prepare_PCGC(phe1, args.prev1, cov1)
if (cov1 is not None): var_t1 = varLiab_covar(args.prev1, tau_i_1, phe1)
else: var_t1=0
if (bed2 is None): u2_0, u2_1, var_t2 = None, None, None
else:
y2_norm, tau_i_2, pcgc_coeff2, ty2, u2_0, u2_1 = pcgcs_utils.prepare_PCGC(phe2, args.prev2, cov2)
if (cov2 is not None): var_t2 = varLiab_covar(args.prev2, tau_i_2, phe2)
else: var_t2=0
pcgc_coeff12 = np.sqrt(pcgc_coeff1 * pcgc_coeff2)
#compute z-scores
z1_nocov = y1_norm.dot(X1) / np.sqrt(len(phe1))
z1_withcov = (ty1 * (u1_0+u1_1)).dot(X1)
if (bed2 is not None):
z2_nocov = y2_norm.dot(X2) / np.sqrt(len(phe2))
z2_withcov = (ty2 * (u2_0+u2_1)).dot(X2)
#write z-scores if required
if (args.z1_nocov_out is not None): write_sumstats(z1_nocov, len(phe1), bed1.sid, args.z1_nocov_out)
if (args.z1_cov_out is not None): write_sumstats(z1_withcov, len(phe1), bed1.sid, args.z1_cov_out, compute_p=False)
if (args.z2_nocov_out is not None): write_sumstats(z2_nocov, len(phe2), bed2.sid, args.z2_nocov_out)
if (args.z2_cov_out is not None): write_sumstats(z2_withcov, len(phe2), bed2.sid, args.z2_cov_out, compute_p=False)
print_memory_usage(5)
#write Gty files
if (args.Gty1_nocov_out is not None):
Gty1 = np.sqrt(G1_diag) * y1_norm
df = pd.DataFrame(bed1.iid, columns=['fid', 'iid'])
df['Gty1'] = Gty1
df.to_csv(args.Gty1_nocov_out, sep='\t', index=False, float_format='%0.6e', header=None)
if (args.Gty2_nocov_out is not None):
Gty2 = np.sqrt(G2_diag) * y2_norm
df = pd.DataFrame(bed2.iid, columns=['fid', 'iid'])
df['Gty2'] = Gty2
df.to_csv(args.Gty2_nocov_out, sep='\t', index=False, float_format='%0.6e', header=None)
if (args.Gty1_cov_out is not None):
Gty1 = np.sqrt(G1_diag) * ty1 * (u1_0 + u1_1)
df = pd.DataFrame(bed1.iid, columns=['fid', 'iid'])
df['Gty1'] = Gty1
df.to_csv(args.Gty1_cov_out, sep='\t', index=False, float_format='%0.6e', header=None)
if (args.Gty2_cov_out is not None):
Gty2 = np.sqrt(G2_diag) * ty2 * (u2_0 + u2_1)
df = pd.DataFrame(bed2.iid, columns=['fid', 'iid'])
df['Gty2'] = Gty2
df.to_csv(args.Gty2_cov_out, sep='\t', index=False, float_format='%0.6e', header=None)
if (args.sumstats_only > 0):
print_sumstats(cov1, u1_0, u1_1, var_t1, cov2, u2_0, u2_1, var_t2, s1, sum_s1, sum_s1_sqr, s2, sum_s2, sum_s2_sqr)
sys.exit(0)
#find overlapping individuals
if (bed2 is not None):
print 'marking correlations between overlapping individuals...'
is_same = np.zeros((X1.shape[0], X2.shape[0]), dtype=np.bool)
is_same1 = np.zeros(X1.shape[0], dtype=np.bool)
is_same2 = np.zeros(X2.shape[0], dtype=np.bool)
num_overlap=0
for i1, ind1 in enumerate(bed1.iid[:,1]):
for i2, ind2 in enumerate(bed2.iid[:,1]):
if (ind1 == ind2):
is_same[i1,i2] = True
is_same1[i1] = True
is_same2[i2] = True
num_overlap+=1
print 'found %d overlapping individuals'%(num_overlap)
#G12_issame = np.mean(X1[is_same1] * X2[is_same2], axis=1)
G12_issame = np.einsum('ij,ij->i', X1[is_same1], X2[is_same2]) / float(X1.shape[1])
#Compute PCGC estimates, ignore covariates
#sig2g_1_nocov_old = np.sum(np.outer(y1_norm, y1_norm) * G1) / np.sum(G1**2) / pcgc_coeff1
sig2g1_numer = z1_nocov.dot(z1_nocov) * len(phe1) / float(X1.shape[1]) - G1_diag.dot(y1_norm**2)
print 'computing PCGC denominator without covariates...'
t0 = time.time()
sig2g1_denom = pcgc_denom_lowmem_nocov(X1,X1, window_size=args.mem_size)
print 'done in %0.2f seconds'%(time.time() - t0)
sig2g_1_nocov = sig2g1_numer / sig2g1_denom / pcgc_coeff1
if (bed2 is not None):
#sig2g_2_nocov_old = np.sum(np.outer(y2_norm, y2_norm) * G2) / np.sum(G2**2) / pcgc_coeff2
sig2g2_numer = z2_nocov.dot(z2_nocov) * len(phe2) / float(X2.shape[1]) - G2_diag.dot(y2_norm**2)
sig2g2_denom = pcgc_denom_lowmem_nocov(X2,X2, window_size=args.mem_size)
sig2g_2_nocov = sig2g2_numer / sig2g2_denom / pcgc_coeff2
#rho_nocov_old = np.sum(np.outer(y1_norm, y2_norm) * G12) / np.sum(G12**2) / pcgc_coeff12
rho_numer = z1_nocov.dot(z2_nocov) * np.sqrt(len(phe1) * len(phe2)) / float(X2.shape[1]) - np.sum(G12_issame * y1_norm[is_same1] * y2_norm[is_same2])
rho_denom = pcgc_denom_lowmem_nocov(X1, X2, is_same=is_same, window_size=args.mem_size)
rho_nocov = rho_numer / rho_denom / pcgc_coeff12
#perform jackknife computations
if (args.jackknife > 0):
print 'Computing jackknife standard errors with omitted covariates...'
t0 = time.time()
if (bed2 is None):
sig2g1_se_nocov = pcgc_jackknife_sig2g(X1, y1_norm, sig2g1_numer, sig2g1_denom, pcgc_coeff1, window_size=args.mem_size)
else:
sig2g1_se_nocov, sig2g2_se_nocov, rho_se_nocov, corr_se_nocov = pcgc_jackknife_corr(X1, X2, y1_norm, y2_norm,
sig2g1_numer, sig2g1_denom, sig2g2_numer, sig2g2_denom, rho_numer, rho_denom,
pcgc_coeff1, pcgc_coeff2, pcgc_coeff12,
is_same=is_same, window_size=args.mem_size)
print 'done in %0.2f seconds'%(time.time() - t0)
print
print 'Results when excluding covariates'
print '---------------------------------'
print 'study 1 h2: %0.4f'%(sig2g_1_nocov),
if (args.jackknife>0): print '(%0.4f)'%(sig2g1_se_nocov),
print
if (bed2 is not None):
print 'study 2 h2: %0.4f'%(sig2g_2_nocov),
if (args.jackknife>0): print '(%0.4f)'%(sig2g2_se_nocov),
print
print 'genetic covariance: %0.4f'%(rho_nocov),
if (args.jackknife>0): print '(%0.4f)'%(rho_se_nocov),
print
print 'genetic correlation: %0.4f'%(rho_nocov / np.sqrt(sig2g_1_nocov * sig2g_2_nocov)),
if (args.jackknife>0): print '(%0.4f)'%(corr_se_nocov),
print
#permutation testing code
if (args.num_perms > 0):
print
print 'Performing covariate-less permutation testing for heritability of study 1 with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_nocov = permutation_test_heritability(X1, y1_norm, G1_diag, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'study 1 h2 p-value (excluding covariates): %0.5e'%(rho_pvalue_nocov)
if (rho_pvalue_nocov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
if (bed2 is not None and args.num_perms > 0):
print
print 'Performing covariate-less permutation testing for heritability of study 2 with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_nocov = permutation_test_heritability(X2, y2_norm, G2_diag, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'study 2 h2 p-value (excluding covariates): %0.5e'%(rho_pvalue_nocov)
if (rho_pvalue_nocov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
print
print 'Performing covariate-less permutation testing for genetic correlation with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_nocov = permutation_test2(X1, y1_norm, X2, y2_norm, G12_issame, is_same1, is_same2, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'genetic correlation p-value (excluding covariates): %0.5e'%(rho_pvalue_nocov)
if (rho_pvalue_nocov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
print
print
if (cov1 is not None or cov2 is not None):
qty1 = ty1 * (u1_0 + u1_1)
if (bed2 is not None): qty2 = ty2 * (u2_0 + u2_1)
#Compute PCGC estimates, include covariates
#sig2g_1_withcov_old = np.sum(np.outer(ty1, ty1)*G1*Q1) / np.sum((G1*Q1)**2)
numer_sig2g1 = z1_withcov.dot(z1_withcov) / X1.shape[1] - G1_diag.dot(qty1**2)
denom_sig2g1 = pcgc_denom_lowmem(X1, X1, u1_0, u1_1, u1_0, u1_1, window_size=args.mem_size)
sig2g_1_withcov = numer_sig2g1 / denom_sig2g1
h2_1_withcov = sig2g_1_withcov / (1 + var_t1)
if (bed2 is not None):
#sig2g_2_withcov_old = np.sum(np.outer(ty2, ty2)*G2*Q2) / np.sum((G2*Q2)**2)
numer_sig2g2 = z2_withcov.dot(z2_withcov) / X2.shape[1] - G2_diag.dot(qty2**2)
denom_sig2g2 = pcgc_denom_lowmem(X2, X2, u2_0, u2_1, u2_0, u2_1, window_size=args.mem_size)
sig2g_2_withcov = numer_sig2g2 / denom_sig2g2
h2_2_withcov = sig2g_2_withcov / (1 + var_t2)
#rho_withcov_old = np.sum(np.outer(ty1, ty2)*G12*Q12) / np.sum((G12*Q12)**2)
numer_rho = z1_withcov.dot(z2_withcov) / X2.shape[1] - np.sum(G12_issame * qty1[is_same1] * qty2[is_same2])
denom_rho = pcgc_denom_lowmem(X1, X2, u1_0, u1_1, u2_0, u2_1, is_same, window_size=args.mem_size)
rho_withcov = numer_rho / denom_rho
if (args.jackknife > 0):
print 'Computing jackknife standard errors with covariates...'
t0 = time.time()
if (bed2 is None):
sig2g1_se_withcov = pcgc_jackknife_sig2g(X1, ty1, numer_sig2g1, denom_sig2g1, u0=u1_0, u1=u1_1, window_size=args.mem_size)
else:
sig2g1_se_withcov, sig2g2_se_withcov, rho_se_withcov, corr_se_withcov = pcgc_jackknife_corr(X1, X2, ty1, ty2,
numer_sig2g1, denom_sig2g1, numer_sig2g2, denom_sig2g2, numer_rho, denom_rho,
u1_0=u1_0, u1_1=u1_1, u2_0=u2_0, u2_1=u2_1,
is_same=is_same, window_size=args.mem_size)
print 'done in %0.2f seconds'%(time.time()-t0)
print
print 'Results when including covariates'
print '---------------------------------'
if (args.jackknife==0):
print 'study 1 h2: %0.4f (genetic variance: %0.4f)'%(h2_1_withcov, sig2g_1_withcov)
else:
print 'study 1 h2: %0.4f (%0.4f) (genetic variance: %0.4f (%0.4f))'%(h2_1_withcov, sig2g1_se_withcov/(1+var_t1), sig2g_1_withcov, sig2g1_se_withcov)
if (bed2 is not None):
if (args.jackknife==0):
print 'study 2 h2: %0.4f (genetic variance: %0.4f)'%(h2_2_withcov, sig2g_2_withcov)
print 'genetic covariance: %0.4f'%(rho_withcov)
print 'genetic correlation: %0.4f'%(rho_withcov / np.sqrt(sig2g_1_withcov * sig2g_2_withcov))
else:
print 'study 2 h2: %0.4f (%0.4f) (genetic variance: %0.4f (%0.4f))'%(h2_2_withcov, sig2g2_se_withcov/(1+var_t2), sig2g_2_withcov, sig2g2_se_withcov)
print 'genetic covariance: %0.4f (%0.4f)'%(rho_withcov, rho_se_withcov)
print 'genetic correlation: %0.4f (%0.4f)'%(rho_withcov / np.sqrt(sig2g_1_withcov * sig2g_2_withcov), corr_se_withcov)
#permutation testing code
if (args.num_perms > 0):
print
print 'Performing covariate-aware permutation testing for heritability of study 1 with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_cov = permutation_test_heritability(X1, qty1, G1_diag, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'study 1 h2 p-value (including covariates): %0.5e'%(rho_pvalue_cov)
if (rho_pvalue_cov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
if (args.covar2 is not None and args.num_perms > 0):
print
print 'Performing covariate-aware permutation testing for heritability of study 2 with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_cov = permutation_test_heritability(X2, qty2, G2_diag, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'study 2 h2 p-value (including covariates): %0.5e'%(rho_pvalue_cov)
if (rho_pvalue_cov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
print
print 'Performing covariate-aware permutation testing for genetic correlation with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_cov = permutation_test2(X1, qty1, X2, qty2, G12_issame, is_same1, is_same2, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'genetic correlation p-value (including covariates): %0.5e'%(rho_pvalue_cov)
if (rho_pvalue_cov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
print_sumstats(cov1, u1_0, u1_1, var_t1, cov2, u2_0, u2_1, var_t2, s1, sum_s1, sum_s1_sqr, s2, sum_s2, sum_s2_sqr) | mit |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/errors/types/policy_violation_error.py | 1 | 1169 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.errors',
marshal='google.ads.googleads.v6',
manifest={
'PolicyViolationErrorEnum',
},
)
class PolicyViolationErrorEnum(proto.Message):
r"""Container for enum describing possible policy violation
errors.
"""
class PolicyViolationError(proto.Enum):
r"""Enum describing possible policy violation errors."""
UNSPECIFIED = 0
UNKNOWN = 1
POLICY_ERROR = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
rahushen/ansible | lib/ansible/playbook/role_include.py | 23 | 5507 |
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os.path import basename
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role import Role
from ansible.playbook.role.include import RoleInclude
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['IncludeRole']
class IncludeRole(TaskInclude):
"""
A Role include is derived from a regular role to handle the special
circumstances related to the `- include_role: ...`
"""
BASE = ('name', 'role') # directly assigned
FROM_ARGS = ('tasks_from', 'vars_from', 'defaults_from') # used to populate from dict in role
OTHER_ARGS = ('private', 'allow_duplicates') # assigned to matching property
VALID_ARGS = tuple(frozenset(BASE + FROM_ARGS + OTHER_ARGS)) # all valid args
_inheritable = False
# =================================================================================
# ATTRIBUTES
# private as this is a 'module options' vs a task property
_allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)
_private = FieldAttribute(isa='bool', default=None, private=True)
def __init__(self, block=None, role=None, task_include=None):
super(IncludeRole, self).__init__(block=block, role=role, task_include=task_include)
self._from_files = {}
self._parent_role = role
self._role_name = None
self._role_path = None
def get_block_list(self, play=None, variable_manager=None, loader=None):
# only need play passed in when dynamic
if play is None:
myplay = self._parent._play
else:
myplay = play
ri = RoleInclude.load(self._role_name, play=myplay, variable_manager=variable_manager, loader=loader)
ri.vars.update(self.vars)
# build role
actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=self._from_files)
actual_role._metadata.allow_duplicates = self.allow_duplicates
# save this for later use
self._role_path = actual_role._role_path
# compile role with parent roles as dependencies to ensure they inherit
# variables
if not self._parent_role:
dep_chain = []
else:
dep_chain = list(self._parent_role._parents)
dep_chain.append(self._parent_role)
blocks = actual_role.compile(play=myplay, dep_chain=dep_chain)
for b in blocks:
b._parent = self
# updated available handlers in play
handlers = actual_role.get_handler_blocks(play=myplay)
myplay.handlers = myplay.handlers + handlers
return blocks, handlers
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
ir = IncludeRole(block, role, task_include=task_include).load_data(data, variable_manager=variable_manager, loader=loader)
# Validate options
my_arg_names = frozenset(ir.args.keys())
# name is needed, or use role as alias
ir._role_name = ir.args.get('name', ir.args.get('role'))
if ir._role_name is None:
raise AnsibleParserError("'name' is a required field for include_role.")
# validate bad args, otherwise we silently ignore
bad_opts = my_arg_names.difference(IncludeRole.VALID_ARGS)
if bad_opts:
raise AnsibleParserError('Invalid options for include_role: %s' % ','.join(list(bad_opts)))
# build options for role includes
for key in my_arg_names.intersection(IncludeRole.FROM_ARGS):
from_key = key.replace('_from', '')
ir._from_files[from_key] = basename(ir.args.get(key))
# manual list as otherwise the options would set other task parameters we don't want.
for option in my_arg_names.intersection(IncludeRole.OTHER_ARGS):
setattr(ir, option, ir.args.get(option))
return ir
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(IncludeRole, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
new_me.statically_loaded = self.statically_loaded
new_me._from_files = self._from_files.copy()
new_me._parent_role = self._parent_role
new_me._role_name = self._role_name
new_me._role_path = self._role_path
return new_me
def get_include_params(self):
v = super(IncludeRole, self).get_include_params()
if self._parent_role:
v.update(self._parent_role.get_role_params())
return v
| gpl-3.0 |
xdggplus/jsunpack-n | html.py | 23 | 9270 | #!/usr/bin/python
'''
Jsunpackn - A generic JavaScript Unpacker Network Edition
Copyright (C) 2010 Blake Hartstein
http://jsunpack.jeek.org/
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
import re
import sys
try:
from bs4 import BeautifulSoup
except ImportError:
# BeautifulSoup 4.x not installed trying BeautifulSoup 3.x
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
print ('BeautifulSoup not installed')
exit(-1)
class Parser:
'''
A simple HTML language parser. Uses the 'htmlparse.conf' file to define rules.
Please read that file for more information on the syntax
<Parser obj>.storage is a 'special' return field. You should only use it if you
wish to get the result in python instead of via an output string.
'''
debug = False
def __init__(self, htmlparseconfig):
self.storage = []
self.html_definitions = {}
self.html_filters = {}
self.html_parse_rules = []
try:
htmlrules = htmlparseconfig.splitlines()
except:
htmlrules = []
print 'Problem while parsing HTML parsing rules'
line = 0
for htmlrule in htmlrules:
line += 1
htmlrule = re.sub('\n', '', htmlrule)
if not re.match('^\s*$|^#', htmlrule):
htmlrule = re.sub('[ \t]+', ' ', htmlrule)
field = htmlrule.split(' ')
if htmlrule.startswith('!define'):
if len(field) > 1:
name, value = field[1], ' '.join(field[2:])
self.html_definitions[name] = value
elif htmlrule.startswith('!parse'):
if len(field) == 4:
tag = field[1]
if tag == '*':
tag = True
attrib = {}
invals = field[2].split(',')
for val in invals:
if val == '*' or val.startswith('!'):
pass
else:
attrib[val] = True
hformat, outvals = field[3].split(':')
outvals = outvals.split(',')
self.html_parse_rules.append([tag, attrib, invals,
hformat, outvals])
elif htmlrule.startswith('!filter'):
if len(field) > 2:
tag, value = field[1], ' '.join(field[2:])
self.html_filters[tag] = re.sub('^\s+|\s+$', '', value)
else:
print 'fatal: invalid htmlparse.config line: %d' % line
if self.debug:
print ('done loading htmlparse, (%d parse_rules, %d definitions, '
'%d filters)' % (len(self.html_parse_rules),
len(self.html_definitions),
len(self.html_filters)))
def htmlparse(self, data):
'''
Input: can be html code or raw JavaScript code
Output: an array of [headers, raw JavaScript]
'''
outheader, out = '', ''
data = re.sub('\x00', '', data)
try:
soup = BeautifulSoup(data)
except:
print('Fatal error during HTML parsing')
return '', ''
for tag, attrib, invals, hformat, outvals in self.html_parse_rules:
for htm in soup.findAll(tag, attrib):
now = {}
ignore = False #if a negated match occurs
for val in invals:
if val.startswith('!'):
#negated match
val = val[1:]
try:
now[val] = str(htm[val])
ignore = True
except:
pass #expected behavior
if not ignore:
for val in outvals:
if val == '*':
now['*'] = ''
elif val == 'contents':
try:
now['contents'] = ' '.join(map(str,
htm.contents))
except KeyError:
now['contents'] = ''
except UnicodeEncodeError:
now['contents'] = ' '.join(map(str,
str(htm.contents)
))
elif val == 'name':
try:
now['name'] = htm.name
except KeyError:
now['name'] = ''
else:
try:
now[val] = str(htm[val])
except KeyError:
now[val] = ''
#normalize when assigning to variables
for k in now:
# if this fails, it means that we are trying to get the
# result in python
if hformat in self.html_definitions:
if not hformat.startswith('raw'):
now[k] = re.sub('([^a-zA-Z0-9])',
lambda m: ('\\x%02x'
% ord(m.group(1))),
now[k])
now[k] = "'%s'" % now[k]
# if this fails, it means that we are trying to get the
# result in python
if hformat in self.html_definitions:
myfmt = re.sub('^\s+', '',
self.html_definitions[hformat]
).split('%s')
if len(myfmt) - 1 == len(outvals):
lineout = ''
for i in range(0, len(outvals)):
lineout += myfmt[i]
lineout += now[outvals[i]]
lineout += myfmt[-1] + '\n'
if htm.name in self.html_filters:
lineout = re.sub(self.html_filters[htm.name],
'', lineout)
if '*' in self.html_filters:
lineout = re.sub(self.html_filters['*'], '',
lineout, re.I)
if hformat.startswith('header'):
outheader += lineout
else:
out += lineout
else:
print ('fatal: invalid htmlparse.config hformat, '
'parameter count or definition problem')
else:
for i in range(0, len(outvals)):
self.storage.append([hformat, now[outvals[i]]])
return str(outheader), str(out)
def main():
'''
Testing html Parser with pdf as input
'''
Parser.debug = True
#fin = open('htmlparse.config', 'r')
#htmlparseconfig = fin.read()
#fin.close()
pdfparseconfig = '''
!define rawSCRIPT ;%s
!parse script * rawSCRIPT:contents
!parse imagefield1 * to_python:contents
!filter script <[/]?script[^>]*>|<!--|//-->
!filter * ^javascript:\s*|^return\s+
'''
#xfa:contenttype
hparser = Parser(pdfparseconfig)
#hparser = Parser(htmlparseconfig)
for infile in sys.argv[1:]:
fin = open(infile, 'rb')
data = fin.read()
fin.close()
parsed_header, parsed = hparser.htmlparse(data)
parsed = parsed_header + parsed
if len(parsed) > 0:
fout = open('%s.out' % infile, 'wb')
fout.write(parsed)
fout.close()
print 'Wrote %s.out (%d bytes)' % (infile, len(parsed))
else:
print 'Nothing parsed for %s' % infile
if __name__ == '__main__':
main()
| gpl-2.0 |
fperez/sympy | sympy/thirdparty/pyglet/pyglet/font/__init__.py | 5 | 23261 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Load fonts and render text.
This is a fairly-low level interface to text rendering. Obtain a font using
`load`::
from pyglet import font
arial = font.load('Arial', 14, bold=True, italic=False)
pyglet will load any system-installed fonts. You can add additional fonts
(for example, from your program resources) using `add_file` or
`add_directory`.
Obtain a list of `Glyph` objects for a string of text using the `Font`
object::
text = 'Hello, world!'
glyphs = arial.get_glyphs(text)
The most efficient way to render these glyphs is with a `GlyphString`::
glyph_string = GlyphString(text, glyphs)
glyph_string.draw()
There are also a variety of methods in both `Font` and
`GlyphString` to facilitate word-wrapping.
A convenient way to render a string of text is with a `Text`::
text = Text(font, text)
text.draw()
See the `pyglet.font.base` module for documentation on the base classes used
by this package.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 2136 2008-07-24 00:50:15Z Alex.Holkner $'
import sys
import os
import math
import weakref
import pyglet
from pyglet.gl import *
from pyglet import gl
from pyglet import image
from pyglet import window
class GlyphString(object):
'''An immutable string of glyphs that can be rendered quickly.
This class is ideal for quickly rendering single or multi-line strings
of text that use the same font. To wrap text using a glyph string,
call `get_break_index` to find the optimal breakpoint for each line,
the repeatedly call `draw` for each breakpoint.
:deprecated: Use `pyglet.text.layout` classes.
'''
def __init__(self, text, glyphs, x=0, y=0):
'''Create a glyph string.
The `text` string is used to determine valid breakpoints; all glyphs
must have already been determined using
`pyglet.font.base.Font.get_glyphs`. The string
will be positioned with the baseline of the left-most glyph at the
given coordinates.
:Parameters:
`text` : str or unicode
String to represent.
`glyphs` : list of `pyglet.font.base.Glyph`
Glyphs representing `text`.
`x` : float
X coordinate of the left-side bearing of the left-most glyph.
`y` : float
Y coordinate of the baseline.
'''
# Create an interleaved array in GL_T2F_V3F format and determine
# state changes required.
lst = []
texture = None
self.text = text
self.states = []
self.cumulative_advance = [] # for fast post-string breaking
state_from = 0
state_length = 0
for i, glyph in enumerate(glyphs):
if glyph.owner != texture:
if state_length:
self.states.append((state_from, state_length, texture))
texture = glyph.owner
state_from = i
state_length = 0
state_length += 1
t = glyph.tex_coords
lst += [t[0], t[1], t[2], 1.,
x + glyph.vertices[0], y + glyph.vertices[1], 0., 1.,
t[3], t[4], t[5], 1.,
x + glyph.vertices[2], y + glyph.vertices[1], 0., 1.,
t[6], t[7], t[8], 1.,
x + glyph.vertices[2], y + glyph.vertices[3], 0., 1.,
t[9], t[10], t[11], 1.,
x + glyph.vertices[0], y + glyph.vertices[3], 0., 1.]
x += glyph.advance
self.cumulative_advance.append(x)
self.states.append((state_from, state_length, texture))
self.array = (c_float * len(lst))(*lst)
self.width = x
def get_break_index(self, from_index, width):
'''Find a breakpoint within the text for a given width.
Returns a valid breakpoint after `from_index` so that the text
between `from_index` and the breakpoint fits within `width` pixels.
This method uses precomputed cumulative glyph widths to give quick
answer, and so is much faster than
`pyglet.font.base.Font.get_glyphs_for_width`.
:Parameters:
`from_index` : int
Index of text to begin at, or 0 for the beginning of the
string.
`width` : float
Maximum width to use.
:rtype: int
:return: the index of text which will be used as the breakpoint, or
`from_index` if there is no valid breakpoint.
'''
to_index = from_index
if from_index >= len(self.text):
return from_index
if from_index:
width += self.cumulative_advance[from_index-1]
for i, (c, w) in enumerate(
zip(self.text[from_index:],
self.cumulative_advance[from_index:])):
if c in u'\u0020\u200b':
to_index = i + from_index + 1
if c == '\n':
return i + from_index + 1
if w > width:
return to_index
return to_index
def get_subwidth(self, from_index, to_index):
'''Return the width of a slice of this string.
:Parameters:
`from_index` : int
The start index of the string to measure.
`to_index` : int
The end index (exclusive) of the string to measure.
:rtype: float
'''
if to_index <= from_index:
return 0
width = self.cumulative_advance[to_index-1]
if from_index:
width -= self.cumulative_advance[from_index-1]
return width
def draw(self, from_index=0, to_index=None):
'''Draw a region of the glyph string.
Assumes texture state is enabled. To enable the texture state::
from pyglet.gl import *
glEnable(GL_TEXTURE_2D)
:Parameters:
`from_index` : int
Start index of text to render.
`to_index` : int
End index (exclusive) of text to render.
'''
if from_index >= len(self.text) or \
from_index == to_index or \
not self.text:
return
# XXX Safe to assume all required textures will use same blend state I
# think. (otherwise move this into loop)
self.states[0][2].apply_blend_state()
if from_index:
glPushMatrix()
glTranslatef(-self.cumulative_advance[from_index-1], 0, 0)
if to_index is None:
to_index = len(self.text)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glInterleavedArrays(GL_T4F_V4F, 0, self.array)
for state_from, state_length, texture in self.states:
if state_from + state_length < from_index:
continue
state_from = max(state_from, from_index)
state_length = min(state_length, to_index - state_from)
if state_length <= 0:
break
glBindTexture(GL_TEXTURE_2D, texture.id)
glDrawArrays(GL_QUADS, state_from * 4, state_length * 4)
glPopClientAttrib()
if from_index:
glPopMatrix()
class _TextZGroup(pyglet.graphics.Group):
z = 0
def set_state(self):
glTranslatef(0, 0, self.z)
def unset_state(self):
glTranslatef(0, 0, -self.z)
class Text(object):
'''Simple displayable text.
This is a convenience class for rendering strings of text. It takes
care of caching the vertices so the text can be rendered every frame with
little performance penalty.
Text can be word-wrapped by specifying a `width` to wrap into. If the
width is not specified, it gives the width of the text as laid out.
:Ivariables:
`x` : int
X coordinate of the text
`y` : int
Y coordinate of the text
:deprecated: Use `pyglet.text.Label`.
'''
# Alignment constants
#: Align the left edge of the text to the given X coordinate.
LEFT = 'left'
#: Align the horizontal center of the text to the given X coordinate.
CENTER = 'center'
#: Align the right edge of the text to the given X coordinate.
RIGHT = 'right'
#: Align the bottom of the descender of the final line of text with the
#: given Y coordinate.
BOTTOM = 'bottom'
#: Align the baseline of the first line of text with the given Y
#: coordinate.
BASELINE = 'baseline'
#: Align the top of the ascender of the first line of text with the given
#: Y coordinate.
TOP = 'top'
# None: no multiline
# 'width': multiline, wrapped to width
# 'multiline': multiline, no wrap
_wrap = None
# Internal bookkeeping for wrap only.
_width = None
def __init__(self, font, text='', x=0, y=0, z=0, color=(1,1,1,1),
width=None, halign=LEFT, valign=BASELINE):
'''Create displayable text.
:Parameters:
`font` : `Font`
Font to render the text in.
`text` : str
Initial string to render.
`x` : float
X coordinate of the left edge of the text.
`y` : float
Y coordinate of the baseline of the text. If the text is
word-wrapped, this refers to the first line of text.
`z` : float
Z coordinate of the text plane.
`color` : 4-tuple of float
Color to render the text in. Alpha values can be specified
in the fourth component.
`width` : float
Width to limit the rendering to. Text will be word-wrapped
if necessary.
`halign` : str
Alignment of the text. See `Text.halign` for details.
`valign` : str
Controls positioning of the text based off the y coordinate.
One of BASELINE, BOTTOM, CENTER or TOP. Defaults to BASELINE.
'''
multiline = False
if width is not None:
self._width = width
self._wrap = 'width'
multiline = True
elif '\n' in text:
self._wrap = 'multiline'
multiline = True
self._group = _TextZGroup()
self._document = pyglet.text.decode_text(text)
self._layout = pyglet.text.layout.TextLayout(self._document,
width=width,
multiline=multiline,
dpi=font.dpi,
group=self._group)
self._layout.begin_update()
if self._wrap == 'multiline':
self._document.set_style(0, len(text), dict(wrap=False))
self.font = font
self.color = color
self._x = x
self.y = y
self.z = z
self.width = width
self.halign = halign
self.valign = valign
self._update_layout_halign()
self._layout.end_update()
def _get_font(self):
return self._font
def _set_font(self, font):
self._font = font
self._layout.begin_update()
self._document.set_style(0, len(self._document.text), {
'font_name': font.name,
'font_size': font.size,
'bold': font.bold,
'italic': font.italic,
})
self._layout._dpi = font.dpi
self._layout.end_update()
font = property(_get_font, _set_font)
def _get_color(self):
color = self._document.get_style('color')
if color is None:
return (1., 1., 1., 1.)
return tuple([c/255. for c in color])
def _set_color(self, color):
color = [int(c * 255) for c in color]
self._document.set_style(0, len(self._document.text), {
'color': color,
})
color = property(_get_color, _set_color)
def _update_layout_halign(self):
if self._layout.multiline:
# TextLayout has a different interpretation of halign that doesn't
# consider the width to be a special factor; here we emulate the
# old behaviour by fudging the layout x value.
if self._layout.anchor_x == 'left':
self._layout.x = self.x
elif self._layout.anchor_x == 'center':
self._layout.x = self.x + self._layout.width - \
self._layout.content_width // 2
elif self._layout.anchor_x == 'right':
self._layout.x = self.x + 2 * self._layout.width - \
self._layout.content_width
else:
self._layout.x = self.x
def _get_x(self):
return self._x
def _set_x(self, x):
self._x = x
self._update_layout_halign()
x = property(_get_x, _set_x)
def _get_y(self):
return self._layout.y
def _set_y(self, y):
self._layout.y = y
y = property(_get_y, _set_y)
def _get_z(self):
return self._group.z
def _set_z(self, z):
self._group.z = z
z = property(_get_z, _set_z)
def _update_wrap(self):
if self._width is not None:
self._wrap = 'width'
elif '\n' in self.text:
self._wrap = 'multiline'
self._layout.begin_update()
if self._wrap == None:
self._layout.multiline = False
elif self._wrap == 'width':
self._layout.multiline = True
self._layout.width = self._width
self._document.set_style(0, len(self.text), dict(wrap=True))
elif self._wrap == 'multiline':
self._layout.multiline = True
self._document.set_style(0, len(self.text), dict(wrap=False))
self._update_layout_halign()
self._layout.end_update()
def _get_width(self):
if self._wrap == 'width':
return self._layout.width
else:
return self._layout.content_width
def _set_width(self, width):
self._width = width
self._update_wrap()
width = property(_get_width, _set_width,
doc='''Width of the text.
When set, this enables word-wrapping to the specified width.
Otherwise, the width of the text as it will be rendered can be
determined.
:type: float
''')
def _get_height(self):
return self._layout.content_height
height = property(_get_height,
doc='''Height of the text.
This property is the ascent minus the descent of the font, unless
there is more than one line of word-wrapped text, in which case
the height takes into account the line leading. Read-only.
:type: float
''')
def _get_text(self):
return self._document.text
def _set_text(self, text):
self._document.text = text
self._update_wrap()
text = property(_get_text, _set_text,
doc='''Text to render.
The glyph vertices are only recalculated as needed, so multiple
changes to the text can be performed with no performance penalty.
:type: str
''')
def _get_halign(self):
return self._layout.anchor_x
def _set_halign(self, halign):
self._layout.anchor_x = halign
self._update_layout_halign()
halign = property(_get_halign, _set_halign,
doc='''Horizontal alignment of the text.
The text is positioned relative to `x` and `width` according to this
property, which must be one of the alignment constants `LEFT`,
`CENTER` or `RIGHT`.
:type: str
''')
def _get_valign(self):
return self._layout.anchor_y
def _set_valign(self, valign):
self._layout.anchor_y = valign
valign = property(_get_valign, _set_valign,
doc='''Vertical alignment of the text.
The text is positioned relative to `y` according to this property,
which must be one of the alignment constants `BOTTOM`, `BASELINE`,
`CENTER` or `TOP`.
:type: str
''')
def _get_leading(self):
return self._document.get_style('leading') or 0
def _set_leading(self, leading):
self._document.set_style(0, len(self._document.text), {
'leading': leading,
})
leading = property(_get_leading, _set_leading,
doc='''Vertical space between adjacent lines, in pixels.
:type: int
''')
def _get_line_height(self):
return self._font.ascent - self._font.descent + self.leading
def _set_line_height(self, line_height):
self.leading = line_height - (self._font.ascent - self._font.descent)
line_height = property(_get_line_height, _set_line_height,
doc='''Vertical distance between adjacent baselines, in pixels.
:type: int
''')
def draw(self):
self._layout.draw()
if not getattr(sys, 'is_epydoc', False):
if sys.platform == 'darwin':
from pyglet.font.carbon import CarbonFont
_font_class = CarbonFont
elif sys.platform in ('win32', 'cygwin'):
if pyglet.options['font'][0] == 'win32':
from pyglet.font.win32 import Win32Font
_font_class = Win32Font
elif pyglet.options['font'][0] == 'gdiplus':
from pyglet.font.win32 import GDIPlusFont
_font_class = GDIPlusFont
else:
assert False, 'Unknown font driver'
else:
from pyglet.font.freetype import FreeTypeFont
_font_class = FreeTypeFont
def load(name=None, size=None, bold=False, italic=False, dpi=None):
'''Load a font for rendering.
:Parameters:
`name` : str, or list of str
Font family, for example, "Times New Roman". If a list of names
is provided, the first one matching a known font is used. If no
font can be matched to the name(s), a default font is used. In
pyglet 1.1, the name may be omitted.
`size` : float
Size of the font, in points. The returned font may be an exact
match or the closest available. In pyglet 1.1, the size may be
omitted, and defaults to 12pt.
`bold` : bool
If True, a bold variant is returned, if one exists for the given
family and size.
`italic` : bool
If True, an italic variant is returned, if one exists for the given
family and size.
`dpi` : float
The assumed resolution of the display device, for the purposes of
determining the pixel size of the font. Defaults to 96.
:rtype: `Font`
'''
# Arbitrary default size
if size is None:
size = 12
if dpi is None:
dpi = 96
# Find first matching name
if type(name) in (tuple, list):
for n in name:
if _font_class.have_font(n):
name = n
break
else:
name = None
# Locate or create font cache
shared_object_space = gl.current_context.object_space
if not hasattr(shared_object_space, 'pyglet_font_font_cache'):
shared_object_space.pyglet_font_font_cache = \
weakref.WeakValueDictionary()
shared_object_space.pyglet_font_font_hold = []
font_cache = shared_object_space.pyglet_font_font_cache
font_hold = shared_object_space.pyglet_font_font_hold
# Look for font name in font cache
descriptor = (name, size, bold, italic, dpi)
if descriptor in font_cache:
return font_cache[descriptor]
# Not in cache, create from scratch
font = _font_class(name, size, bold=bold, italic=italic, dpi=dpi)
# Save parameters for new-style layout classes to recover
font.name = name
font.size = size
font.bold = bold
font.italic = italic
font.dpi = dpi
# Cache font in weak-ref dictionary to avoid reloading while still in use
font_cache[descriptor] = font
# Hold onto refs of last three loaded fonts to prevent them being
# collected if momentarily dropped.
del font_hold[3:]
font_hold.insert(0, font)
return font
def add_file(font):
'''Add a font to pyglet's search path.
In order to load a font that is not installed on the system, you must
call this method to tell pyglet that it exists. You can supply
either a filename or any file-like object.
The font format is platform-dependent, but is typically a TrueType font
file containing a single font face. Note that to load this file after
adding it you must specify the face name to `load`, not the filename.
:Parameters:
`font` : str or file
Filename or file-like object to load fonts from.
'''
if type(font) in (str, unicode):
font = open(font, 'rb')
if hasattr(font, 'read'):
font = font.read()
_font_class.add_font_data(font)
def add_directory(dir):
'''Add a directory of fonts to pyglet's search path.
This function simply calls `add_file` for each file with a ``.ttf``
extension in the given directory. Subdirectories are not searched.
:Parameters:
`dir` : str
Directory that contains font files.
'''
import os
for file in os.listdir(dir):
if file[-4:].lower() == '.ttf':
add_file(os.path.join(dir, file))
| bsd-3-clause |
initcron/ansible | v2/ansible/plugins/action/include_vars.py | 15 | 1637 | # (c) 2013-2014, Benno Joy <benno@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
from types import NoneType
from ansible.errors import AnsibleError
from ansible.parsing import DataLoader
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
source = self._task.args.get('_raw_params')
if self._task._role:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'vars', source)
else:
source = self._loader.path_dwim(source)
if os.path.exists(source):
data = self._loader.load_from_file(source)
if data is None:
data = {}
if not isinstance(data, dict):
raise AnsibleError("%s must be stored as a dictionary/hash" % source)
return dict(ansible_facts=data)
else:
return dict(failed=True, msg="Source file not found.", file=source)
| gpl-3.0 |
joshuajan/odoo | addons/account/wizard/account_financial_report.py | 33 | 5514 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class accounting_report(osv.osv_memory):
_name = "accounting.report"
_inherit = "account.common.report"
_description = "Accounting Report"
_columns = {
'enable_filter': fields.boolean('Enable Comparison'),
'account_report_id': fields.many2one('account.financial.report', 'Account Reports', required=True),
'label_filter': fields.char('Column Label', size=32, help="This label will be displayed on report to show the balance computed for the given comparison filter."),
'fiscalyear_id_cmp': fields.many2one('account.fiscalyear', 'Fiscal Year', help='Keep empty for all open fiscal year'),
'filter_cmp': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods')], "Filter by", required=True),
'period_from_cmp': fields.many2one('account.period', 'Start Period'),
'period_to_cmp': fields.many2one('account.period', 'End Period'),
'date_from_cmp': fields.date("Start Date"),
'date_to_cmp': fields.date("End Date"),
'debit_credit': fields.boolean('Display Debit/Credit Columns', help="This option allows you to get more details about the way your balances are computed. Because it is space consuming, we do not allow to use it while doing a comparison."),
}
def _get_account_report(self, cr, uid, context=None):
# TODO deprecate this it doesnt work in web
menu_obj = self.pool.get('ir.ui.menu')
report_obj = self.pool.get('account.financial.report')
report_ids = []
if context.get('active_id'):
menu = menu_obj.browse(cr, uid, context.get('active_id')).name
report_ids = report_obj.search(cr, uid, [('name','ilike',menu)])
return report_ids and report_ids[0] or False
_defaults = {
'filter_cmp': 'filter_no',
'target_move': 'posted',
'account_report_id': _get_account_report,
}
def _build_comparison_context(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = {}
result['fiscalyear'] = 'fiscalyear_id_cmp' in data['form'] and data['form']['fiscalyear_id_cmp'] or False
result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False
result['chart_account_id'] = 'chart_account_id' in data['form'] and data['form']['chart_account_id'] or False
result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''
if data['form']['filter_cmp'] == 'filter_date':
result['date_from'] = data['form']['date_from_cmp']
result['date_to'] = data['form']['date_to_cmp']
elif data['form']['filter_cmp'] == 'filter_period':
if not data['form']['period_from_cmp'] or not data['form']['period_to_cmp']:
raise osv.except_osv(_('Error!'),_('Select a starting and an ending period'))
result['period_from'] = data['form']['period_from_cmp']
result['period_to'] = data['form']['period_to_cmp']
return result
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
res = super(accounting_report, self).check_report(cr, uid, ids, context=context)
data = {}
data['form'] = self.read(cr, uid, ids, ['account_report_id', 'date_from_cmp', 'date_to_cmp', 'fiscalyear_id_cmp', 'journal_ids', 'period_from_cmp', 'period_to_cmp', 'filter_cmp', 'chart_account_id', 'target_move'], context=context)[0]
for field in ['fiscalyear_id_cmp', 'chart_account_id', 'period_from_cmp', 'period_to_cmp', 'account_report_id']:
if isinstance(data['form'][field], tuple):
data['form'][field] = data['form'][field][0]
comparison_context = self._build_comparison_context(cr, uid, ids, data, context=context)
res['data']['form']['comparison_context'] = comparison_context
return res
def _print_report(self, cr, uid, ids, data, context=None):
data['form'].update(self.read(cr, uid, ids, ['date_from_cmp', 'debit_credit', 'date_to_cmp', 'fiscalyear_id_cmp', 'period_from_cmp', 'period_to_cmp', 'filter_cmp', 'account_report_id', 'enable_filter', 'label_filter','target_move'], context=context)[0])
return self.pool['report'].get_action(cr, uid, [], 'account.report_financial', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
siliconsmiley/QGIS | python/plugins/processing/algs/lidar/lastools/las2txtPro.py | 2 | 2582 | # -*- coding: utf-8 -*-
"""
***************************************************************************
las2txtPro.py
---------------------
Date : October 2014
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'October 2014'
__copyright__ = '(C) 2014, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterString
class las2txtPro(LAStoolsAlgorithm):
PARSE = "PARSE"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('las2txtPro')
self.group, self.i18n_group = self.trAlgorithm('LAStools Production')
self.addParametersPointInputFolderGUI()
self.addParameter(ParameterString(las2txtPro.PARSE,
self.tr("parse string"), "xyz"))
self.addParametersOutputDirectoryGUI()
self.addParametersOutputAppendixGUI()
self.addParametersAdditionalGUI()
self.addParametersCoresGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "las2txt")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputFolderCommands(commands)
parse = self.getParameterValue(las2txtPro.PARSE)
if parse != "xyz":
commands.append("-parse")
commands.append(parse)
self.addParametersOutputDirectoryCommands(commands)
self.addParametersOutputAppendixCommands(commands)
commands.append("-otxt")
self.addParametersAdditionalCommands(commands)
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 |
jasonseminara/OpenSourceFinal | myvenv/lib/python3.5/site-packages/pip/_vendor/progress/bar.py | 404 | 2707 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from . import Progress
from .helpers import WritelnMixin
class Bar(WritelnMixin, Progress):
width = 32
message = ''
suffix = '%(index)d/%(max)d'
bar_prefix = ' |'
bar_suffix = '| '
empty_fill = ' '
fill = '#'
hide_cursor = True
def update(self):
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
message = self.message % self
bar = self.fill * filled_length
empty = self.empty_fill * empty_length
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix,
suffix])
self.writeln(line)
class ChargingBar(Bar):
suffix = '%(percent)d%%'
bar_prefix = ' '
bar_suffix = ' '
empty_fill = '∙'
fill = '█'
class FillingSquaresBar(ChargingBar):
empty_fill = '▢'
fill = '▣'
class FillingCirclesBar(ChargingBar):
empty_fill = '◯'
fill = '◉'
class IncrementalBar(Bar):
phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█')
def update(self):
nphases = len(self.phases)
expanded_length = int(nphases * self.width * self.progress)
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
phase = expanded_length - (filled_length * nphases)
message = self.message % self
bar = self.phases[-1] * filled_length
current = self.phases[phase] if phase > 0 else ''
empty = self.empty_fill * max(0, empty_length - len(current))
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, current, empty,
self.bar_suffix, suffix])
self.writeln(line)
class ShadyBar(IncrementalBar):
phases = (' ', '░', '▒', '▓', '█')
| mit |
msfrank/Higgins | higgins/http/dirlist.py | 1 | 4130 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Directory listing."""
# system imports
import os
import urllib
import stat
import time
# twisted imports
from higgins.http import iweb, resource, http, http_headers
def formatFileSize(size):
if size < 1024:
return '%i' % size
elif size < (1024**2):
return '%iK' % (size / 1024)
elif size < (1024**3):
return '%iM' % (size / (1024**2))
else:
return '%iG' % (size / (1024**3))
class DirectoryLister(resource.Resource):
def __init__(self, pathname, dirs=None,
contentTypes={},
contentEncodings={},
defaultType='text/html'):
self.contentTypes = contentTypes
self.contentEncodings = contentEncodings
self.defaultType = defaultType
# dirs allows usage of the File to specify what gets listed
self.dirs = dirs
self.path = pathname
resource.Resource.__init__(self)
def data_listing(self, request, data):
if self.dirs is None:
directory = os.listdir(self.path)
directory.sort()
else:
directory = self.dirs
files = []
for path in directory:
url = urllib.quote(path, '/')
fullpath = os.path.join(self.path, path)
try:
st = os.stat(fullpath)
except OSError:
continue
if stat.S_ISDIR(st.st_mode):
url = url + '/'
files.append({
'link': url,
'linktext': path + "/",
'size': '',
'type': '-',
'lastmod': time.strftime("%Y-%b-%d %H:%M", time.localtime(st.st_mtime))
})
else:
from higgins.http.static import getTypeAndEncoding
mimetype, encoding = getTypeAndEncoding(
path,
self.contentTypes, self.contentEncodings, self.defaultType)
filesize = st.st_size
files.append({
'link': url,
'linktext': path,
'size': formatFileSize(filesize),
'type': mimetype,
'lastmod': time.strftime("%Y-%b-%d %H:%M", time.localtime(st.st_mtime))
})
return files
def __repr__(self):
return '<DirectoryLister of %r>' % self.path
__str__ = __repr__
def render(self, request):
title = "Directory listing for %s" % urllib.unquote(request.path)
s= """<html><head><title>%s</title><style>
th, .even td, .odd td { padding-right: 0.5em; font-family: monospace}
.even-dir { background-color: #efe0ef }
.even { background-color: #eee }
.odd-dir {background-color: #f0d0ef }
.odd { background-color: #dedede }
.icon { text-align: center }
.listing {
margin-left: auto;
margin-right: auto;
width: 50%%;
padding: 0.1em;
}
body { border: 0; padding: 0; margin: 0; background-color: #efefef;}
h1 {padding: 0.1em; background-color: #777; color: white; border-bottom: thin white dashed;}
</style></head><body><div class="directory-listing"><h1>%s</h1>""" % (title,title)
s+="<table>"
s+="<tr><th>Filename</th><th>Size</th><th>Last Modified</th><th>File Type</th></tr>"
even = False
for row in self.data_listing(request, None):
s+='<tr class="%s">' % (even and 'even' or 'odd',)
s+='<td><a href="%(link)s">%(linktext)s</a></td><td align="right">%(size)s</td><td>%(lastmod)s</td><td>%(type)s</td></tr>' % row
even = not even
s+="</table></div></body></html>"
response = http.Response(200, {}, s)
response.headers.setHeader("content-type", http_headers.MimeType('text', 'html'))
return response
__all__ = ['DirectoryLister']
| lgpl-2.1 |
marckuz/django | tests/utils_tests/test_dateparse.py | 293 | 5308 | from __future__ import unicode_literals
import unittest
from datetime import date, datetime, time, timedelta
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.timezone import get_fixed_timezone
class DateParseTests(unittest.TestCase):
def test_parse_date(self):
# Valid inputs
self.assertEqual(parse_date('2012-04-23'), date(2012, 4, 23))
self.assertEqual(parse_date('2012-4-9'), date(2012, 4, 9))
# Invalid inputs
self.assertEqual(parse_date('20120423'), None)
self.assertRaises(ValueError, parse_date, '2012-04-56')
def test_parse_time(self):
# Valid inputs
self.assertEqual(parse_time('09:15:00'), time(9, 15))
self.assertEqual(parse_time('10:10'), time(10, 10))
self.assertEqual(parse_time('10:20:30.400'), time(10, 20, 30, 400000))
self.assertEqual(parse_time('4:8:16'), time(4, 8, 16))
# Invalid inputs
self.assertEqual(parse_time('091500'), None)
self.assertRaises(ValueError, parse_time, '09:15:90')
def test_parse_datetime(self):
# Valid inputs
self.assertEqual(parse_datetime('2012-04-23T09:15:00'),
datetime(2012, 4, 23, 9, 15))
self.assertEqual(parse_datetime('2012-4-9 4:8:16'),
datetime(2012, 4, 9, 4, 8, 16))
self.assertEqual(parse_datetime('2012-04-23T09:15:00Z'),
datetime(2012, 4, 23, 9, 15, 0, 0, get_fixed_timezone(0)))
self.assertEqual(parse_datetime('2012-4-9 4:8:16-0320'),
datetime(2012, 4, 9, 4, 8, 16, 0, get_fixed_timezone(-200)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400+02:30'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(150)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400+02'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(120)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400-02'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(-120)))
# Invalid inputs
self.assertEqual(parse_datetime('20120423091500'), None)
self.assertRaises(ValueError, parse_datetime, '2012-04-56T09:15:90')
class DurationParseTests(unittest.TestCase):
def test_parse_python_format(self):
timedeltas = [
timedelta(days=4, minutes=15, seconds=30, milliseconds=100), # fractions of seconds
timedelta(hours=10, minutes=15, seconds=30), # hours, minutes, seconds
timedelta(days=4, minutes=15, seconds=30), # multiple days
timedelta(days=1, minutes=00, seconds=00), # single day
timedelta(days=-4, minutes=15, seconds=30), # negative durations
timedelta(minutes=15, seconds=30), # minute & seconds
timedelta(seconds=30), # seconds
]
for delta in timedeltas:
self.assertEqual(parse_duration(format(delta)), delta)
def test_seconds(self):
self.assertEqual(parse_duration('30'), timedelta(seconds=30))
def test_minutes_seconds(self):
self.assertEqual(parse_duration('15:30'), timedelta(minutes=15, seconds=30))
self.assertEqual(parse_duration('5:30'), timedelta(minutes=5, seconds=30))
def test_hours_minutes_seconds(self):
self.assertEqual(parse_duration('10:15:30'), timedelta(hours=10, minutes=15, seconds=30))
self.assertEqual(parse_duration('1:15:30'), timedelta(hours=1, minutes=15, seconds=30))
self.assertEqual(parse_duration('100:200:300'), timedelta(hours=100, minutes=200, seconds=300))
def test_days(self):
self.assertEqual(parse_duration('4 15:30'), timedelta(days=4, minutes=15, seconds=30))
self.assertEqual(parse_duration('4 10:15:30'), timedelta(days=4, hours=10, minutes=15, seconds=30))
def test_fractions_of_seconds(self):
self.assertEqual(parse_duration('15:30.1'), timedelta(minutes=15, seconds=30, milliseconds=100))
self.assertEqual(parse_duration('15:30.01'), timedelta(minutes=15, seconds=30, milliseconds=10))
self.assertEqual(parse_duration('15:30.001'), timedelta(minutes=15, seconds=30, milliseconds=1))
self.assertEqual(parse_duration('15:30.0001'), timedelta(minutes=15, seconds=30, microseconds=100))
self.assertEqual(parse_duration('15:30.00001'), timedelta(minutes=15, seconds=30, microseconds=10))
self.assertEqual(parse_duration('15:30.000001'), timedelta(minutes=15, seconds=30, microseconds=1))
def test_negative(self):
self.assertEqual(parse_duration('-4 15:30'), timedelta(days=-4, minutes=15, seconds=30))
def test_iso_8601(self):
self.assertEqual(parse_duration('P4Y'), None)
self.assertEqual(parse_duration('P4M'), None)
self.assertEqual(parse_duration('P4W'), None)
self.assertEqual(parse_duration('P4D'), timedelta(days=4))
self.assertEqual(parse_duration('P0.5D'), timedelta(hours=12))
self.assertEqual(parse_duration('PT5H'), timedelta(hours=5))
self.assertEqual(parse_duration('PT5M'), timedelta(minutes=5))
self.assertEqual(parse_duration('PT5S'), timedelta(seconds=5))
self.assertEqual(parse_duration('PT0.000005S'), timedelta(microseconds=5))
| bsd-3-clause |
joshloyal/scikit-learn | examples/plot_kernel_approximation.py | 26 | 8069 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = (data[:n_samples // 2],
digits.target[:n_samples // 2])
# Now predict the value of the digit on the second half:
data_test, targets_test = (data[n_samples // 2:],
digits.target[n_samples // 2:])
# data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Generate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
xiandiancloud/ji | cms/djangoapps/contentstore/views/helpers.py | 28 | 5058 | """
Helper methods for Studio views.
"""
from __future__ import absolute_import
import urllib
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from edxmako.shortcuts import render_to_string, render_to_response
from xblock.core import XBlock
from xmodule.modulestore.django import modulestore
from contentstore.utils import reverse_course_url, reverse_usage_url
__all__ = ['edge', 'event', 'landing']
# points to the temporary course landing page with log in and sign up
def landing(request, org, course, coursename):
return render_to_response('temp-course-landing.html', {})
# points to the temporary edge page
def edge(request):
return redirect('/')
def event(request):
'''
A noop to swallow the analytics call so that cms methods don't spook and poor developers looking at
console logs don't get distracted :-)
'''
return HttpResponse(status=204)
def render_from_lms(template_name, dictionary, context=None, namespace='main'):
"""
Render a template using the LMS MAKO_TEMPLATES
"""
return render_to_string(template_name, dictionary, context, namespace="lms." + namespace)
def get_parent_xblock(xblock):
"""
Returns the xblock that is the parent of the specified xblock, or None if it has no parent.
"""
locator = xblock.location
parent_location = modulestore().get_parent_location(locator)
if parent_location is None:
return None
return modulestore().get_item(parent_location)
def is_unit(xblock, parent_xblock=None):
"""
Returns true if the specified xblock is a vertical that is treated as a unit.
A unit is a vertical that is a direct child of a sequential (aka a subsection).
"""
if xblock.category == 'vertical':
if parent_xblock is None:
parent_xblock = get_parent_xblock(xblock)
parent_category = parent_xblock.category if parent_xblock else None
return parent_category == 'sequential'
return False
def xblock_has_own_studio_page(xblock, parent_xblock=None):
"""
Returns true if the specified xblock has an associated Studio page. Most xblocks do
not have their own page but are instead shown on the page of their parent. There
are a few exceptions:
1. Courses
2. Verticals that are either:
- themselves treated as units
- a direct child of a unit
3. XBlocks that support children
"""
category = xblock.category
if is_unit(xblock, parent_xblock):
return True
elif category == 'vertical':
if parent_xblock is None:
parent_xblock = get_parent_xblock(xblock)
return is_unit(parent_xblock) if parent_xblock else False
# All other xblocks with children have their own page
return xblock.has_children
def xblock_studio_url(xblock, parent_xblock=None):
"""
Returns the Studio editing URL for the specified xblock.
"""
if not xblock_has_own_studio_page(xblock, parent_xblock):
return None
category = xblock.category
if category == 'course':
return reverse_course_url('course_handler', xblock.location.course_key)
elif category in ('chapter', 'sequential'):
return u'{url}?show={usage_key}'.format(
url=reverse_course_url('course_handler', xblock.location.course_key),
usage_key=urllib.quote(unicode(xblock.location))
)
else:
return reverse_usage_url('container_handler', xblock.location)
def xblock_type_display_name(xblock, default_display_name=None):
"""
Returns the display name for the specified type of xblock. Note that an instance can be passed in
for context dependent names, e.g. a vertical beneath a sequential is a Unit.
:param xblock: An xblock instance or the type of xblock.
:param default_display_name: The default value to return if no display name can be found.
:return:
"""
if hasattr(xblock, 'category'):
category = xblock.category
if category == 'vertical' and not is_unit(xblock):
return _('Vertical')
else:
category = xblock
if category == 'chapter':
return _('Section')
elif category == 'sequential':
return _('Subsection')
elif category == 'vertical':
return _('Unit')
component_class = XBlock.load_class(category, select=settings.XBLOCK_SELECT_FUNCTION)
if hasattr(component_class, 'display_name') and component_class.display_name.default:
return _(component_class.display_name.default)
else:
return default_display_name
def xblock_primary_child_category(xblock):
"""
Returns the primary child category for the specified xblock, or None if there is not a primary category.
"""
category = xblock.category
if category == 'course':
return 'chapter'
elif category == 'chapter':
return 'sequential'
elif category == 'sequential':
return 'vertical'
return None
| agpl-3.0 |
shrimpboyho/flappy-bird-py | pyglet-1.1.4/pyglet/text/formats/plaintext.py | 9 | 2038 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Plain text decoder.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
class PlainTextDecoder(pyglet.text.DocumentDecoder):
def decode(self, text, location=None):
document = pyglet.text.document.UnformattedDocument()
document.insert_text(0, text)
return document
| gpl-2.0 |
2014c2g23/2015cd_midterm- | static/Brython3.1.0-20150301-090019/Lib/optparse.py | 728 | 60616 | """A powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik.
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext, ngettext
except ImportError:
def gettext(message):
return message
def ngettext(singular, plural, n):
if n == 1:
return singular
return plural
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError("subclasses must implement")
def format_heading(self, heading):
raise NotImplementedError("subclasses must implement")
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_int, _("integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = [opt for opt in opts if opt]
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = sorted(attrs.keys())
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of builtins is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import builtins
if ( isinstance(self.type, type) or
(hasattr(self.type, "__name__") and
getattr(builtins, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
not isinstance(self.callback_args, tuple)):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
not isinstance(self.callback_kwargs, dict)):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __eq__(self, other):
if isinstance(other, Values):
return self.__dict__ == other.__dict__
elif isinstance(other, dict):
return self.__dict__ == other
else:
return NotImplemented
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError("invalid update mode: %r" % mode)
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
exec(open(filename).read(), vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError("invalid conflict_resolution value %r" % handler)
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if isinstance(args[0], str):
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError("not an Option instance: %r" % option)
else:
raise TypeError("invalid arguments")
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if isinstance(args[0], str):
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError("not an OptionGroup instance: %r" % group)
if group.parser is not self:
raise ValueError("invalid OptionGroup (wrong parser)")
else:
raise TypeError("invalid arguments")
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbreviation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print(self.get_usage(), file=file)
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print(self.get_version(), file=file)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| gpl-3.0 |
multicastTor/multicastTor | torps/util/aggregate_relays.py | 1 | 2834 | ##### Aggregate relays that appear in consensus with descriptor 4/12-3/13 #####
## if using in pypy, i needed this first so it would grab networkx and stem correctly
## export PYTHONPATH=/usr/lib/python2.7/site-packages/:/home/rob/research/orsec/stem-install/lib/python2.7/site-packages
import json
import cPickle as pickle
from pathsim import *
from networkx import Graph
from itertools import product
from time import time
in_dir = 'network-state-2012-04--2013-03'
out_file = 'relaypairs.2012-04--2013-03.json'
network_state_files = []
for dirpath, dirnames, filenames in os.walk(in_dir, followlinks=True):
for filename in filenames:
if (filename[0] != '.'):
network_state_files.append(os.path.join(dirpath,filename))
# aggregate relays in consensuses with descriptors
g = Graph()
network_state_files.sort(key = lambda x: os.path.basename(x), reverse=True)
nsf_len = len(network_state_files)
nsf_i = 0
start = time()
lapstamp = start
lapstotal, lapslen = 0.0, 0
chkpntend = os.path.basename(network_state_files[0])[0:10]
for ns_file in network_state_files:
fname = os.path.basename(ns_file)
stamp = time()
lapstotal += (stamp-lapstamp)
lapslen += 1
lapstamp = stamp
# print progress information
sys.stdout.write('\r[{1}/{2}][{0}%][hr elap. {3}][hr rem. {4}]: {5}'.format("%.3f" % (nsf_i * 100.0 / nsf_len), nsf_i+1, nsf_len, "%.3f" % ((stamp-start)/3600.0), "%.3f" % ((lapstotal/lapslen)*(nsf_len-nsf_i)/3600.0), fname))
sys.stdout.flush()
with open(ns_file, 'rb') as nsf:
consensus = pickle.load(nsf)
descriptors = pickle.load(nsf)
ips = {}
# filter all relays in this consensus to those that
# have a descriptor, are running, and are fast
for relay in consensus.relays:
if (relay in descriptors):
sd = descriptors[relay] # server descriptor
rse = consensus.relays[relay] # router status entry
if "Running" in rse.flags and "Fast" in rse.flags:
if relay not in ips: ips[relay] = []
ips[relay].append(sd.address)
# build edges between every relay that could have been
# selected in a path together
for r1 in ips:
for r2 in ips:
if r1 is r2: continue
g.add_edges_from(product(ips[r1], ips[r2]))
nsf_i += 1
# check if we should do a checkpoint and save our progress
if nsf_i == nsf_len or "01-00-00-00" in fname:
chkpntstart = fname[0:10]
with open("relaypairs.{0}--{1}.json".format(chkpntstart, chkpntend), 'wb') as f: json.dump(g.edges(), f)
print ""
print('Num addresses: {0}'.format(g.number_of_nodes()))
print('Num unique pairs: {0}'.format(g.number_of_edges()))
# write final graph to disk
with open(out_file, 'wb') as f: json.dump(g.edges(), f)
##########
| bsd-3-clause |
PurpleBooth/python-vm | venv/lib/python2.7/site-packages/setuptools/command/install_scripts.py | 505 | 2231 | from distutils import log
import distutils.command.install_scripts as orig
import os
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
| mit |
stpierre/sponge | Sponge/urls.py | 1 | 2131 | from django.conf import settings
from django.conf.urls.defaults import *
urlpatterns = patterns('sponge.views.repos',
(r'^$', 'list'),
(r'^repo/$', 'list'),
(r'^repo/add/$', 'add'),
(r'^repo/(?P<repo_id>[^/]+)/$', 'view'),
(r'^repo/(?P<repo_id>[^/]+)/delete/$', 'delete'),
(r'^repo/(?P<repo_id>[^/]+)/promote/$', 'promote_select'),
(r'^repo/promote/(?P<pid>\d+)$', 'promote_ok'),
(r'^repo/(?P<repo_id>[^/]+)/demote/$', 'demote_select'),
(r'^repo/demote/(?P<pid>\d+)$', 'demote_ok'),
(r'^repo/(?P<repo_id>[^/]+)/promote/package/(?P<package>[a-z0-9\-]+)$',
'promote_package'),
(r'^repo/(?P<repo_id>[^/]+)/demote/package/(?P<package>[a-z0-9\-]+)$',
'demote_package'),
(r'^repo/(?P<repo_id>[^/]+)/diff/(?P<repo_id2>[^/]+)/$', 'diff'),
(r'^repo/(?P<repo_id>[^/]+)/sync/$', 'sync'),
(r'^repo/(?P<repo_id>[^/]+)/rebuild/$', 'rebuild_metadata'),
(r'^repo/(?P<repo_id>[^/]+)/clone/$', 'clone'))
urlpatterns += patterns('sponge.views.filters',
(r'^filter/$', 'list'),
(r'^filter/add/$', 'add'),
(r'^filter/(?P<filter_id>[^/]+)/$', 'view'),
(r'^filter/(?P<filter_id>[^/]+)/delete/$', 'delete'))
urlpatterns += patterns('sponge.views.users',
(r'^users/$', 'list'),
(r'^users/add/$', 'add'),
(r'^users/(?P<login>[^/]+)/$', 'view'),
(r'^users/(?P<login>[^/]+)/delete/$', 'delete'))
urlpatterns += patterns('sponge.views.tasks',
(r'^tasks/$', 'list'),
(r'^tasks/(?P<task_id>[^/]+)/delete/$', 'delete'))
urlpatterns += patterns('',
(r'^login/$', 'django.contrib.auth.views.login',
{'template_name': 'login.html'}),
(r'^logout/$', 'sponge.views.logout'),
(r'^config/$', 'sponge.views.configure'),
)
if settings.DEBUG:
from django.views.static import serve
_media_url = settings.MEDIA_URL
if _media_url.startswith('/'):
_media_url = _media_url[1:]
urlpatterns += patterns('',
(r'^%s(?P<path>.*)$' % _media_url,
serve,
{'document_root': settings.MEDIA_ROOT}))
del(_media_url, serve)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.