repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
dmazzella/micropython
|
refs/heads/master
|
tests/float/string_format_modulo2.py
|
30
|
# test formatting floats with large precision, that it doesn't overflow the buffer
def test(num, num_str):
if num == float('inf') or num == 0.0 and num_str != '0.0':
# skip numbers that overflow or underflow the FP precision
return
for kind in ('e', 'f', 'g'):
# check precision either side of the size of the buffer (32 bytes)
for prec in range(23, 36, 2):
fmt = '%.' + '%d' % prec + kind
s = fmt % num
check = abs(float(s) - num)
if num > 1:
check /= num
if check > 1e-6:
print('FAIL', num_str, fmt, s, len(s), check)
# check pure zero
test(0.0, '0.0')
# check some powers of 10, making sure to include exponents with 3 digits
for e in range(-8, 8):
num = pow(10, e)
test(num, '1e%d' % e)
|
codrut3/tensorflow
|
refs/heads/master
|
tensorflow/tools/docs/doc_generator_visitor.py
|
68
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `traverse` visitor for processing documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.util import tf_inspect
class DocGeneratorVisitor(object):
"""A visitor that generates docs for a python object when __call__ed."""
def __init__(self, root_name=''):
"""Make a visitor.
As this visitor is starting its traversal at a module or class, it will not
be told the name of that object during traversal. `root_name` is the name it
should use for that object, effectively prefixing all names with
"root_name.".
Args:
root_name: The name of the root module/class.
"""
self.set_root_name(root_name)
self._index = {}
self._tree = {}
self._reverse_index = None
self._duplicates = None
self._duplicate_of = None
def set_root_name(self, root_name):
"""Sets the root name for subsequent __call__s."""
self._root_name = root_name or ''
self._prefix = (root_name + '.') if root_name else ''
@property
def index(self):
"""A map from fully qualified names to objects to be documented.
The index is filled when the visitor is passed to `traverse`.
Returns:
The index filled by traversal.
"""
return self._index
@property
def tree(self):
"""A map from fully qualified names to all its child names for traversal.
The full name to member names map is filled when the visitor is passed to
`traverse`.
Returns:
The full name to member name map filled by traversal.
"""
return self._tree
@property
def reverse_index(self):
"""A map from `id(object)` to the preferred fully qualified name.
This map only contains non-primitive objects (no numbers or strings) present
in `index` (for primitive objects, `id()` doesn't quite do the right thing).
It is computed when it, `duplicate_of`, or `duplicates` are first accessed.
Returns:
The `id(object)` to full name map.
"""
self._maybe_find_duplicates()
return self._reverse_index
@property
def duplicate_of(self):
"""A map from duplicate full names to a preferred fully qualified name.
This map only contains names that are not themself a preferred name.
It is computed when it, `reverse_index`, or `duplicates` are first accessed.
Returns:
The map from duplicate name to preferred name.
"""
self._maybe_find_duplicates()
return self._duplicate_of
@property
def duplicates(self):
"""A map from preferred full names to a list of all names for this symbol.
This function returns a map from preferred (master) name for a symbol to a
lexicographically sorted list of all aliases for that name (incl. the master
name). Symbols without duplicate names do not appear in this map.
It is computed when it, `reverse_index`, or `duplicate_of` are first
accessed.
Returns:
The map from master name to list of all duplicate names.
"""
self._maybe_find_duplicates()
return self._duplicates
def _add_prefix(self, name):
"""Adds the root name to a name."""
return self._prefix + name if name else self._root_name
def __call__(self, parent_name, parent, children):
"""Visitor interface, see `tensorflow/tools/common:traverse` for details.
This method is called for each symbol found in a traversal using
`tensorflow/tools/common:traverse`. It should not be called directly in
user code.
Args:
parent_name: The fully qualified name of a symbol found during traversal.
parent: The Python object referenced by `parent_name`.
children: A list of `(name, py_object)` pairs enumerating, in alphabetical
order, the children (as determined by `tf_inspect.getmembers`) of
`parent`. `name` is the local name of `py_object` in `parent`.
Raises:
RuntimeError: If this visitor is called with a `parent` that is not a
class or module.
"""
parent_name = self._add_prefix(parent_name)
self._index[parent_name] = parent
self._tree[parent_name] = []
if not (tf_inspect.ismodule(parent) or tf_inspect.isclass(parent)):
raise RuntimeError('Unexpected type in visitor -- %s: %r' % (parent_name,
parent))
for i, (name, child) in enumerate(list(children)):
# Don't document __metaclass__
if name in ['__metaclass__']:
del children[i]
continue
full_name = '.'.join([parent_name, name]) if parent_name else name
self._index[full_name] = child
self._tree[parent_name].append(name)
def _maybe_find_duplicates(self):
"""Compute data structures containing information about duplicates.
Find duplicates in `index` and decide on one to be the "master" name.
Computes a reverse_index mapping each object id to its master name.
Also computes a map `duplicate_of` from aliases to their master name (the
master name itself has no entry in this map), and a map `duplicates` from
master names to a lexicographically sorted list of all aliases for that name
(incl. the master name).
All these are computed and set as fields if they haven't already.
"""
if self._reverse_index is not None:
return
# Maps the id of a symbol to its fully qualified name. For symbols that have
# several aliases, this map contains the first one found.
# We use id(py_object) to get a hashable value for py_object. Note all
# objects in _index are in memory at the same time so this is safe.
reverse_index = {}
# Make a preliminary duplicates map. For all sets of duplicate names, it
# maps the first name found to a list of all duplicate names.
raw_duplicates = {}
for full_name, py_object in six.iteritems(self._index):
# We cannot use the duplicate mechanism for some constants, since e.g.,
# id(c1) == id(c2) with c1=1, c2=1. This is unproblematic since constants
# have no usable docstring and won't be documented automatically.
if (py_object is not None and
not isinstance(py_object, six.integer_types + six.string_types +
(six.binary_type, six.text_type, float, complex, bool))
and py_object is not ()):
object_id = id(py_object)
if object_id in reverse_index:
master_name = reverse_index[object_id]
if master_name in raw_duplicates:
raw_duplicates[master_name].append(full_name)
else:
raw_duplicates[master_name] = [master_name, full_name]
else:
reverse_index[object_id] = full_name
# Decide on master names, rewire duplicates and make a duplicate_of map
# mapping all non-master duplicates to the master name. The master symbol
# does not have an entry in this map.
duplicate_of = {}
# Duplicates maps the main symbols to the set of all duplicates of that
# symbol (incl. itself).
duplicates = {}
for names in raw_duplicates.values():
names = sorted(names)
# Choose the lexicographically first name with the minimum number of
# submodules. This will prefer highest level namespace for any symbol.
master_name = min(names, key=lambda name: name.count('.'))
duplicates[master_name] = names
for name in names:
if name != master_name:
duplicate_of[name] = master_name
# Set the reverse index to the canonical name.
reverse_index[id(self._index[master_name])] = master_name
self._duplicate_of = duplicate_of
self._duplicates = duplicates
self._reverse_index = reverse_index
|
davidrenne/django-allauth
|
refs/heads/master
|
allauth/socialaccount/south_migrations/0011_auto__chg_field_socialtoken_token.py
|
77
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SocialToken.token'
db.alter_column('socialaccount_socialtoken', 'token', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'SocialToken.token'
db.alter_column('socialaccount_socialtoken', 'token', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialaccount': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'client_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'})
},
'socialaccount.socialtoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.TextField', [], {}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['socialaccount']
|
nerzhul/ansible
|
refs/heads/devel
|
test/runner/lib/core_ci.py
|
14
|
"""Access Ansible Core CI remote services."""
from __future__ import absolute_import, print_function
import json
import os
import traceback
import uuid
import errno
import time
from lib.http import (
HttpClient,
HttpResponse,
HttpError,
)
from lib.util import (
ApplicationError,
run_command,
make_dirs,
CommonConfig,
display,
is_shippable,
)
class AnsibleCoreCI(object):
"""Client for Ansible Core CI services."""
def __init__(self, args, platform, version, stage='prod', persist=True, name=None):
"""
:type args: CommonConfig
:type platform: str
:type version: str
:type stage: str
:type persist: bool
:type name: str
"""
self.args = args
self.platform = platform
self.version = version
self.stage = stage
self.client = HttpClient(args)
self.connection = None
self.instance_id = None
self.name = name if name else '%s-%s' % (self.platform, self.version)
if self.platform == 'windows':
self.ssh_key = None
self.endpoint = 'https://14blg63h2i.execute-api.us-east-1.amazonaws.com'
self.port = 5986
elif self.platform == 'freebsd':
self.ssh_key = SshKey(args)
self.endpoint = 'https://14blg63h2i.execute-api.us-east-1.amazonaws.com'
self.port = 22
elif self.platform == 'osx':
self.ssh_key = SshKey(args)
self.endpoint = 'https://osx.testing.ansible.com'
self.port = None
else:
raise ApplicationError('Unsupported platform: %s' % platform)
self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s' % (self.name, self.stage))
if persist and self._load():
try:
display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
self.connection = self.get()
display.info('Loaded existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
except HttpError as ex:
if ex.status != 404:
raise
self._clear()
display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
self.instance_id = None
else:
self.instance_id = None
self._clear()
if self.instance_id:
self.started = True
else:
self.started = False
self.instance_id = str(uuid.uuid4())
display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
def start(self):
"""Start instance."""
if is_shippable():
self.start_shippable()
else:
self.start_remote()
def start_remote(self):
"""Start instance for remote development/testing."""
with open(os.path.expanduser('~/.ansible-core-ci.key'), 'r') as key_fd:
auth_key = key_fd.read().strip()
self._start(dict(
remote=dict(
key=auth_key,
nonce=None,
),
))
def start_shippable(self):
"""Start instance on Shippable."""
self._start(dict(
shippable=dict(
run_id=os.environ['SHIPPABLE_BUILD_ID'],
job_number=int(os.environ['SHIPPABLE_JOB_NUMBER']),
),
))
def stop(self):
"""Stop instance."""
if not self.started:
display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
response = self.client.delete(self._uri)
if response.status_code == 404:
self._clear()
display.info('Cleared invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
if response.status_code == 200:
self._clear()
display.info('Stopped running %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
raise self._create_http_error(response)
def get(self):
"""
Get instance connection information.
:rtype: InstanceConnection
"""
if not self.started:
display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return None
if self.connection and self.connection.running:
return self.connection
response = self.client.get(self._uri)
if response.status_code != 200:
raise self._create_http_error(response)
if self.args.explain:
self.connection = InstanceConnection(
running=True,
hostname='cloud.example.com',
port=self.port or 12345,
username='username',
password='password' if self.platform == 'windows' else None,
)
else:
response_json = response.json()
status = response_json['status']
con = response_json['connection']
self.connection = InstanceConnection(
running=status == 'running',
hostname=con['hostname'],
port=int(con.get('port', self.port)),
username=con['username'],
password=con.get('password'),
)
status = 'running' if self.connection.running else 'starting'
display.info('Retrieved %s %s/%s instance %s.' % (status, self.platform, self.version, self.instance_id),
verbosity=1)
return self.connection
def wait(self):
"""Wait for the instance to become ready."""
for _ in range(1, 90):
if self.get().running:
return
time.sleep(10)
raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
(self.platform, self.version, self.instance_id))
@property
def _uri(self):
return '%s/%s/jobs/%s' % (self.endpoint, self.stage, self.instance_id)
def _start(self, auth):
"""Start instance."""
if self.started:
display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
data = dict(
config=dict(
platform=self.platform,
version=self.version,
public_key=self.ssh_key.pub_contents if self.ssh_key else None,
query=False,
)
)
data.update(dict(auth=auth))
headers = {
'Content-Type': 'application/json',
}
response = self.client.put(self._uri, data=json.dumps(data), headers=headers)
if response.status_code != 200:
raise self._create_http_error(response)
self.started = True
self._save()
display.info('Started %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
def _clear(self):
"""Clear instance information."""
try:
self.connection = None
os.remove(self.path)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def _load(self):
"""Load instance information."""
try:
with open(self.path, 'r') as instance_fd:
self.instance_id = instance_fd.read()
self.started = True
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
self.instance_id = None
return self.instance_id
def _save(self):
"""Save instance information."""
if self.args.explain:
return
make_dirs(os.path.dirname(self.path))
with open(self.path, 'w') as instance_fd:
instance_fd.write(self.instance_id)
@staticmethod
def _create_http_error(response):
"""
:type response: HttpResponse
:rtype: ApplicationError
"""
response_json = response.json()
stack_trace = ''
if 'message' in response_json:
message = response_json['message']
elif 'errorMessage' in response_json:
message = response_json['errorMessage'].strip()
if 'stackTrace' in response_json:
trace = '\n'.join([x.rstrip() for x in traceback.format_list(response_json['stackTrace'])])
stack_trace = ('\nTraceback (from remote server):\n%s' % trace)
else:
message = str(response_json)
return HttpError(response.status_code, '%s%s' % (message, stack_trace))
class SshKey(object):
"""Container for SSH key used to connect to remote instances."""
def __init__(self, args):
"""
:type args: CommonConfig
"""
tmp = os.path.expanduser('~/.ansible/test/')
self.key = os.path.join(tmp, 'id_rsa')
self.pub = os.path.join(tmp, 'id_rsa.pub')
if not os.path.isfile(self.pub):
if not args.explain:
make_dirs(tmp)
run_command(args, ['ssh-keygen', '-q', '-t', 'rsa', '-N', '', '-f', self.key])
if args.explain:
self.pub_contents = None
else:
with open(self.pub, 'r') as pub_fd:
self.pub_contents = pub_fd.read().strip()
class InstanceConnection(object):
"""Container for remote instance status and connection details."""
def __init__(self, running, hostname, port, username, password):
"""
:type running: bool
:type hostname: str
:type port: int
:type username: str
:type password: str | None
"""
self.running = running
self.hostname = hostname
self.port = port
self.username = username
self.password = password
def __str__(self):
if self.password:
return '%s:%s [%s:%s]' % (self.hostname, self.port, self.username, self.password)
return '%s:%s [%s]' % (self.hostname, self.port, self.username)
|
HyperBaton/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_switch_controller_managed_switch.py
|
7
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_switch_controller_managed_switch
short_description: Configure FortiSwitch devices that are managed by this FortiGate in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify switch_controller feature and managed_switch category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
switch_controller_managed_switch:
description:
- Configure FortiSwitch devices that are managed by this FortiGate.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
802_1X_settings:
description:
- Configuration method to edit FortiSwitch 802.1X global settings.
type: dict
suboptions:
link_down_auth:
description:
- Authentication state to set if a link is down.
type: str
choices:
- set-unauth
- no-action
local_override:
description:
- Enable to override global 802.1X settings on individual FortiSwitches.
type: str
choices:
- enable
- disable
max_reauth_attempt:
description:
- Maximum number of authentication attempts (0 - 15).
type: int
reauth_period:
description:
- Reauthentication time interval (1 - 1440 min).
type: int
custom_command:
description:
- Configuration method to edit FortiSwitch commands to be pushed to this FortiSwitch device upon rebooting the FortiGate switch controller
or the FortiSwitch.
type: list
suboptions:
command_entry:
description:
- List of FortiSwitch commands.
type: str
command_name:
description:
- Names of commands to be pushed to this FortiSwitch device, as configured under config switch-controller custom-command. Source
switch-controller.custom-command.command-name.
type: str
delayed_restart_trigger:
description:
- Delayed restart triggered for this FortiSwitch.
type: int
description:
description:
- Description.
type: str
directly_connected:
description:
- Directly connected FortiSwitch.
type: int
dynamic_capability:
description:
- List of features this FortiSwitch supports (not configurable) that is sent to the FortiGate device for subsequent configuration
initiated by the FortiGate device.
type: int
dynamically_discovered:
description:
- Dynamically discovered FortiSwitch.
type: int
fsw_wan1_admin:
description:
- FortiSwitch WAN1 admin status; enable to authorize the FortiSwitch as a managed switch.
type: str
choices:
- discovered
- disable
- enable
fsw_wan1_peer:
description:
- Fortiswitch WAN1 peer port.
type: str
fsw_wan2_admin:
description:
- FortiSwitch WAN2 admin status; enable to authorize the FortiSwitch as a managed switch.
type: str
choices:
- discovered
- disable
- enable
fsw_wan2_peer:
description:
- FortiSwitch WAN2 peer port.
type: str
igmp_snooping:
description:
- Configure FortiSwitch IGMP snooping global settings.
type: dict
suboptions:
aging_time:
description:
- Maximum time to retain a multicast snooping entry for which no packets have been seen (15 - 3600 sec).
type: int
flood_unknown_multicast:
description:
- Enable/disable unknown multicast flooding.
type: str
choices:
- enable
- disable
local_override:
description:
- Enable/disable overriding the global IGMP snooping configuration.
type: str
choices:
- enable
- disable
max_allowed_trunk_members:
description:
- FortiSwitch maximum allowed trunk members.
type: int
mirror:
description:
- Configuration method to edit FortiSwitch packet mirror.
type: list
suboptions:
dst:
description:
- Destination port.
type: str
name:
description:
- Mirror name.
required: true
type: str
src_egress:
description:
- Source egress interfaces.
type: list
suboptions:
name:
description:
- Interface name.
required: true
type: str
src_ingress:
description:
- Source ingress interfaces.
type: list
suboptions:
name:
description:
- Interface name.
required: true
type: str
status:
description:
- Active/inactive mirror configuration.
type: str
choices:
- active
- inactive
switching_packet:
description:
- Enable/disable switching functionality when mirroring.
type: str
choices:
- enable
- disable
name:
description:
- Managed-switch name.
type: str
owner_vdom:
description:
- VDOM which owner of port belongs to.
type: str
poe_detection_type:
description:
- PoE detection type for FortiSwitch.
type: int
poe_pre_standard_detection:
description:
- Enable/disable PoE pre-standard detection.
type: str
choices:
- enable
- disable
ports:
description:
- Managed-switch port list.
type: list
suboptions:
allowed_vlans:
description:
- Configure switch port tagged vlans
type: list
suboptions:
vlan_name:
description:
- VLAN name. Source system.interface.name.
type: str
allowed_vlans_all:
description:
- Enable/disable all defined vlans on this port.
type: str
choices:
- enable
- disable
arp_inspection_trust:
description:
- Trusted or untrusted dynamic ARP inspection.
type: str
choices:
- untrusted
- trusted
bundle:
description:
- Enable/disable Link Aggregation Group (LAG) bundling for non-FortiLink interfaces.
type: str
choices:
- enable
- disable
description:
description:
- Description for port.
type: str
dhcp_snoop_option82_trust:
description:
- Enable/disable allowance of DHCP with option-82 on untrusted interface.
type: str
choices:
- enable
- disable
dhcp_snooping:
description:
- Trusted or untrusted DHCP-snooping interface.
type: str
choices:
- untrusted
- trusted
discard_mode:
description:
- Configure discard mode for port.
type: str
choices:
- none
- all-untagged
- all-tagged
edge_port:
description:
- Enable/disable this interface as an edge port, bridging connections between workstations and/or computers.
type: str
choices:
- enable
- disable
export_tags:
description:
- Switch controller export tag name.
type: list
suboptions:
tag_name:
description:
- Switch tag name. Source switch-controller.switch-interface-tag.name.
type: str
export_to:
description:
- Export managed-switch port to a tenant VDOM. Source system.vdom.name.
type: str
export_to_pool:
description:
- Switch controller export port to pool-list. Source switch-controller.virtual-port-pool.name.
type: str
export_to_pool_flag:
description:
- Switch controller export port to pool-list.
type: int
fgt_peer_device_name:
description:
- FGT peer device name.
type: str
fgt_peer_port_name:
description:
- FGT peer port name.
type: str
fiber_port:
description:
- Fiber-port.
type: int
flags:
description:
- Port properties flags.
type: int
fortilink_port:
description:
- FortiLink uplink port.
type: int
igmp_snooping:
description:
- Set IGMP snooping mode for the physical port interface.
type: str
choices:
- enable
- disable
igmps_flood_reports:
description:
- Enable/disable flooding of IGMP reports to this interface when igmp-snooping enabled.
type: str
choices:
- enable
- disable
igmps_flood_traffic:
description:
- Enable/disable flooding of IGMP snooping traffic to this interface.
type: str
choices:
- enable
- disable
isl_local_trunk_name:
description:
- ISL local trunk name.
type: str
isl_peer_device_name:
description:
- ISL peer device name.
type: str
isl_peer_port_name:
description:
- ISL peer port name.
type: str
lacp_speed:
description:
- end Link Aggregation Control Protocol (LACP) messages every 30 seconds (slow) or every second (fast).
type: str
choices:
- slow
- fast
learning_limit:
description:
- Limit the number of dynamic MAC addresses on this Port (1 - 128, 0 = no limit, default).
type: int
lldp_profile:
description:
- LLDP port TLV profile. Source switch-controller.lldp-profile.name.
type: str
lldp_status:
description:
- LLDP transmit and receive status.
type: str
choices:
- disable
- rx-only
- tx-only
- tx-rx
loop_guard:
description:
- Enable/disable loop-guard on this interface, an STP optimization used to prevent network loops.
type: str
choices:
- enabled
- disabled
loop_guard_timeout:
description:
- Loop-guard timeout (0 - 120 min).
type: int
max_bundle:
description:
- Maximum size of LAG bundle (1 - 24)
type: int
mclag:
description:
- Enable/disable multi-chassis link aggregation (MCLAG).
type: str
choices:
- enable
- disable
member_withdrawal_behavior:
description:
- Port behavior after it withdraws because of loss of control packets.
type: str
choices:
- forward
- block
members:
description:
- Aggregated LAG bundle interfaces.
type: list
suboptions:
member_name:
description:
- Interface name from available options.
type: str
min_bundle:
description:
- Minimum size of LAG bundle (1 - 24)
type: int
mode:
description:
- "LACP mode: ignore and do not send control messages, or negotiate 802.3ad aggregation passively or actively."
type: str
choices:
- static
- lacp-passive
- lacp-active
poe_capable:
description:
- PoE capable.
type: int
poe_pre_standard_detection:
description:
- Enable/disable PoE pre-standard detection.
type: str
choices:
- enable
- disable
poe_status:
description:
- Enable/disable PoE status.
type: str
choices:
- enable
- disable
port_name:
description:
- Switch port name.
type: str
port_number:
description:
- Port number.
type: int
port_owner:
description:
- Switch port name.
type: str
port_prefix_type:
description:
- Port prefix type.
type: int
port_security_policy:
description:
- Switch controller authentication policy to apply to this managed switch from available options. Source switch-controller
.security-policy.802-1X.name switch-controller.security-policy.captive-portal.name.
type: str
port_selection_criteria:
description:
- Algorithm for aggregate port selection.
type: str
choices:
- src-mac
- dst-mac
- src-dst-mac
- src-ip
- dst-ip
- src-dst-ip
qos_policy:
description:
- Switch controller QoS policy from available options. Source switch-controller.qos.qos-policy.name.
type: str
sample_direction:
description:
- sFlow sample direction.
type: str
choices:
- tx
- rx
- both
sflow_counter_interval:
description:
- sFlow sampler counter polling interval (1 - 255 sec).
type: int
sflow_sample_rate:
description:
- sFlow sampler sample rate (0 - 99999 p/sec).
type: int
sflow_sampler:
description:
- Enable/disable sFlow protocol on this interface.
type: str
choices:
- enabled
- disabled
speed:
description:
- Switch port speed; default and available settings depend on hardware.
type: str
choices:
- 10half
- 10full
- 100half
- 100full
- 1000auto
- 1000fiber
- 1000full
- 10000
- 40000
- auto
- auto-module
- 100FX-half
- 100FX-full
- 100000full
- 2500full
- 25000full
- 50000full
speed_mask:
description:
- Switch port speed mask.
type: int
stacking_port:
description:
- Stacking port.
type: int
status:
description:
- "Switch port admin status: up or down."
type: str
choices:
- up
- down
stp_bpdu_guard:
description:
- Enable/disable STP BPDU guard on this interface.
type: str
choices:
- enabled
- disabled
stp_bpdu_guard_timeout:
description:
- BPDU Guard disabling protection (0 - 120 min).
type: int
stp_root_guard:
description:
- Enable/disable STP root guard on this interface.
type: str
choices:
- enabled
- disabled
stp_state:
description:
- Enable/disable Spanning Tree Protocol (STP) on this interface.
type: str
choices:
- enabled
- disabled
switch_id:
description:
- Switch id.
type: str
type:
description:
- "Interface type: physical or trunk port."
type: str
choices:
- physical
- trunk
untagged_vlans:
description:
- Configure switch port untagged vlans
type: list
suboptions:
vlan_name:
description:
- VLAN name. Source system.interface.name.
type: str
virtual_port:
description:
- Virtualized switch port.
type: int
vlan:
description:
- Assign switch ports to a VLAN. Source system.interface.name.
type: str
pre_provisioned:
description:
- Pre-provisioned managed switch.
type: int
staged_image_version:
description:
- Staged image version for FortiSwitch.
type: str
storm_control:
description:
- Configuration method to edit FortiSwitch storm control for measuring traffic activity using data rates to prevent traffic disruption.
type: dict
suboptions:
broadcast:
description:
- Enable/disable storm control to drop broadcast traffic.
type: str
choices:
- enable
- disable
local_override:
description:
- Enable to override global FortiSwitch storm control settings for this FortiSwitch.
type: str
choices:
- enable
- disable
rate:
description:
- Rate in packets per second at which storm traffic is controlled (1 - 10000000). Storm control drops excess traffic data rates
beyond this threshold.
type: int
unknown_multicast:
description:
- Enable/disable storm control to drop unknown multicast traffic.
type: str
choices:
- enable
- disable
unknown_unicast:
description:
- Enable/disable storm control to drop unknown unicast traffic.
type: str
choices:
- enable
- disable
stp_settings:
description:
- Configuration method to edit Spanning Tree Protocol (STP) settings used to prevent bridge loops.
type: dict
suboptions:
forward_time:
description:
- Period of time a port is in listening and learning state (4 - 30 sec).
type: int
hello_time:
description:
- Period of time between successive STP frame Bridge Protocol Data Units (BPDUs) sent on a port (1 - 10 sec).
type: int
local_override:
description:
- Enable to configure local STP settings that override global STP settings.
type: str
choices:
- enable
- disable
max_age:
description:
- Maximum time before a bridge port saves its configuration BPDU information (6 - 40 sec).
type: int
max_hops:
description:
- Maximum number of hops between the root bridge and the furthest bridge (1- 40).
type: int
name:
description:
- Name of local STP settings configuration.
type: str
pending_timer:
description:
- Pending time (1 - 15 sec).
type: int
revision:
description:
- STP revision number (0 - 65535).
type: int
status:
description:
- Enable/disable STP.
type: str
choices:
- enable
- disable
switch_device_tag:
description:
- User definable label/tag.
type: str
switch_id:
description:
- Managed-switch id.
type: str
switch_log:
description:
- Configuration method to edit FortiSwitch logging settings (logs are transferred to and inserted into the FortiGate event log).
type: dict
suboptions:
local_override:
description:
- Enable to configure local logging settings that override global logging settings.
type: str
choices:
- enable
- disable
severity:
description:
- Severity of FortiSwitch logs that are added to the FortiGate event log.
type: str
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
status:
description:
- Enable/disable adding FortiSwitch logs to the FortiGate event log.
type: str
choices:
- enable
- disable
switch_profile:
description:
- FortiSwitch profile. Source switch-controller.switch-profile.name.
type: str
switch_stp_settings:
description:
- Configure spanning tree protocol (STP).
type: dict
suboptions:
status:
description:
- Enable/disable STP.
type: str
choices:
- enable
- disable
type:
description:
- Indication of switch type, physical or virtual.
type: str
choices:
- virtual
- physical
version:
description:
- FortiSwitch version.
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure FortiSwitch devices that are managed by this FortiGate.
fortios_switch_controller_managed_switch:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
switch_controller_managed_switch:
802_1X_settings:
link_down_auth: "set-unauth"
local_override: "enable"
max_reauth_attempt: "6"
reauth_period: "7"
custom_command:
-
command_entry: "<your_own_value>"
command_name: "<your_own_value> (source switch-controller.custom-command.command-name)"
delayed_restart_trigger: "11"
description: "<your_own_value>"
directly_connected: "13"
dynamic_capability: "14"
dynamically_discovered: "15"
fsw_wan1_admin: "discovered"
fsw_wan1_peer: "<your_own_value>"
fsw_wan2_admin: "discovered"
fsw_wan2_peer: "<your_own_value>"
igmp_snooping:
aging_time: "21"
flood_unknown_multicast: "enable"
local_override: "enable"
max_allowed_trunk_members: "24"
mirror:
-
dst: "<your_own_value>"
name: "default_name_27"
src_egress:
-
name: "default_name_29"
src_ingress:
-
name: "default_name_31"
status: "active"
switching_packet: "enable"
name: "default_name_34"
owner_vdom: "<your_own_value>"
poe_detection_type: "36"
poe_pre_standard_detection: "enable"
ports:
-
allowed_vlans:
-
vlan_name: "<your_own_value> (source system.interface.name)"
allowed_vlans_all: "enable"
arp_inspection_trust: "untrusted"
bundle: "enable"
description: "<your_own_value>"
dhcp_snoop_option82_trust: "enable"
dhcp_snooping: "untrusted"
discard_mode: "none"
edge_port: "enable"
export_tags:
-
tag_name: "<your_own_value> (source switch-controller.switch-interface-tag.name)"
export_to: "<your_own_value> (source system.vdom.name)"
export_to_pool: "<your_own_value> (source switch-controller.virtual-port-pool.name)"
export_to_pool_flag: "53"
fgt_peer_device_name: "<your_own_value>"
fgt_peer_port_name: "<your_own_value>"
fiber_port: "56"
flags: "57"
fortilink_port: "58"
igmp_snooping: "enable"
igmps_flood_reports: "enable"
igmps_flood_traffic: "enable"
isl_local_trunk_name: "<your_own_value>"
isl_peer_device_name: "<your_own_value>"
isl_peer_port_name: "<your_own_value>"
lacp_speed: "slow"
learning_limit: "66"
lldp_profile: "<your_own_value> (source switch-controller.lldp-profile.name)"
lldp_status: "disable"
loop_guard: "enabled"
loop_guard_timeout: "70"
max_bundle: "71"
mclag: "enable"
member_withdrawal_behavior: "forward"
members:
-
member_name: "<your_own_value>"
min_bundle: "76"
mode: "static"
poe_capable: "78"
poe_pre_standard_detection: "enable"
poe_status: "enable"
port_name: "<your_own_value>"
port_number: "82"
port_owner: "<your_own_value>"
port_prefix_type: "84"
port_security_policy: "<your_own_value> (source switch-controller.security-policy.802-1X.name switch-controller.security-policy.captive-portal
.name)"
port_selection_criteria: "src-mac"
qos_policy: "<your_own_value> (source switch-controller.qos.qos-policy.name)"
sample_direction: "tx"
sflow_counter_interval: "89"
sflow_sample_rate: "90"
sflow_sampler: "enabled"
speed: "10half"
speed_mask: "93"
stacking_port: "94"
status: "up"
stp_bpdu_guard: "enabled"
stp_bpdu_guard_timeout: "97"
stp_root_guard: "enabled"
stp_state: "enabled"
switch_id: "<your_own_value>"
type: "physical"
untagged_vlans:
-
vlan_name: "<your_own_value> (source system.interface.name)"
virtual_port: "104"
vlan: "<your_own_value> (source system.interface.name)"
pre_provisioned: "106"
staged_image_version: "<your_own_value>"
storm_control:
broadcast: "enable"
local_override: "enable"
rate: "111"
unknown_multicast: "enable"
unknown_unicast: "enable"
stp_settings:
forward_time: "115"
hello_time: "116"
local_override: "enable"
max_age: "118"
max_hops: "119"
name: "default_name_120"
pending_timer: "121"
revision: "122"
status: "enable"
switch_device_tag: "<your_own_value>"
switch_id: "<your_own_value>"
switch_log:
local_override: "enable"
severity: "emergency"
status: "enable"
switch_profile: "<your_own_value> (source switch-controller.switch-profile.name)"
switch_stp_settings:
status: "enable"
type: "virtual"
version: "134"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_switch_controller_managed_switch_data(json):
option_list = ['802_1X_settings', 'custom_command', 'delayed_restart_trigger',
'description', 'directly_connected', 'dynamic_capability',
'dynamically_discovered', 'fsw_wan1_admin', 'fsw_wan1_peer',
'fsw_wan2_admin', 'fsw_wan2_peer', 'igmp_snooping',
'max_allowed_trunk_members', 'mirror', 'name',
'owner_vdom', 'poe_detection_type', 'poe_pre_standard_detection',
'ports', 'pre_provisioned', 'staged_image_version',
'storm_control', 'stp_settings', 'switch_device_tag',
'switch_id', 'switch_log', 'switch_profile',
'switch_stp_settings', 'type', 'version']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def switch_controller_managed_switch(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['switch_controller_managed_switch'] and data['switch_controller_managed_switch']:
state = data['switch_controller_managed_switch']['state']
else:
state = True
switch_controller_managed_switch_data = data['switch_controller_managed_switch']
filtered_data = underscore_to_hyphen(filter_switch_controller_managed_switch_data(switch_controller_managed_switch_data))
if state == "present":
return fos.set('switch-controller',
'managed-switch',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('switch-controller',
'managed-switch',
mkey=filtered_data['switch-id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_switch_controller(data, fos):
if data['switch_controller_managed_switch']:
resp = switch_controller_managed_switch(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"switch_controller_managed_switch": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"802_1X_settings": {"required": False, "type": "dict",
"options": {
"link_down_auth": {"required": False, "type": "str",
"choices": ["set-unauth", "no-action"]},
"local_override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"max_reauth_attempt": {"required": False, "type": "int"},
"reauth_period": {"required": False, "type": "int"}
}},
"custom_command": {"required": False, "type": "list",
"options": {
"command_entry": {"required": False, "type": "str"},
"command_name": {"required": False, "type": "str"}
}},
"delayed_restart_trigger": {"required": False, "type": "int"},
"description": {"required": False, "type": "str"},
"directly_connected": {"required": False, "type": "int"},
"dynamic_capability": {"required": False, "type": "int"},
"dynamically_discovered": {"required": False, "type": "int"},
"fsw_wan1_admin": {"required": False, "type": "str",
"choices": ["discovered", "disable", "enable"]},
"fsw_wan1_peer": {"required": False, "type": "str"},
"fsw_wan2_admin": {"required": False, "type": "str",
"choices": ["discovered", "disable", "enable"]},
"fsw_wan2_peer": {"required": False, "type": "str"},
"igmp_snooping": {"required": False, "type": "dict",
"options": {
"aging_time": {"required": False, "type": "int"},
"flood_unknown_multicast": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_override": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"max_allowed_trunk_members": {"required": False, "type": "int"},
"mirror": {"required": False, "type": "list",
"options": {
"dst": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"src_egress": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"src_ingress": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"status": {"required": False, "type": "str",
"choices": ["active", "inactive"]},
"switching_packet": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"name": {"required": False, "type": "str"},
"owner_vdom": {"required": False, "type": "str"},
"poe_detection_type": {"required": False, "type": "int"},
"poe_pre_standard_detection": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ports": {"required": False, "type": "list",
"options": {
"allowed_vlans": {"required": False, "type": "list",
"options": {
"vlan_name": {"required": False, "type": "str"}
}},
"allowed_vlans_all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"arp_inspection_trust": {"required": False, "type": "str",
"choices": ["untrusted", "trusted"]},
"bundle": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"description": {"required": False, "type": "str"},
"dhcp_snoop_option82_trust": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dhcp_snooping": {"required": False, "type": "str",
"choices": ["untrusted", "trusted"]},
"discard_mode": {"required": False, "type": "str",
"choices": ["none", "all-untagged", "all-tagged"]},
"edge_port": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"export_tags": {"required": False, "type": "list",
"options": {
"tag_name": {"required": False, "type": "str"}
}},
"export_to": {"required": False, "type": "str"},
"export_to_pool": {"required": False, "type": "str"},
"export_to_pool_flag": {"required": False, "type": "int"},
"fgt_peer_device_name": {"required": False, "type": "str"},
"fgt_peer_port_name": {"required": False, "type": "str"},
"fiber_port": {"required": False, "type": "int"},
"flags": {"required": False, "type": "int"},
"fortilink_port": {"required": False, "type": "int"},
"igmp_snooping": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"igmps_flood_reports": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"igmps_flood_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"isl_local_trunk_name": {"required": False, "type": "str"},
"isl_peer_device_name": {"required": False, "type": "str"},
"isl_peer_port_name": {"required": False, "type": "str"},
"lacp_speed": {"required": False, "type": "str",
"choices": ["slow", "fast"]},
"learning_limit": {"required": False, "type": "int"},
"lldp_profile": {"required": False, "type": "str"},
"lldp_status": {"required": False, "type": "str",
"choices": ["disable", "rx-only", "tx-only",
"tx-rx"]},
"loop_guard": {"required": False, "type": "str",
"choices": ["enabled", "disabled"]},
"loop_guard_timeout": {"required": False, "type": "int"},
"max_bundle": {"required": False, "type": "int"},
"mclag": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"member_withdrawal_behavior": {"required": False, "type": "str",
"choices": ["forward", "block"]},
"members": {"required": False, "type": "list",
"options": {
"member_name": {"required": False, "type": "str"}
}},
"min_bundle": {"required": False, "type": "int"},
"mode": {"required": False, "type": "str",
"choices": ["static", "lacp-passive", "lacp-active"]},
"poe_capable": {"required": False, "type": "int"},
"poe_pre_standard_detection": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"poe_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"port_name": {"required": False, "type": "str"},
"port_number": {"required": False, "type": "int"},
"port_owner": {"required": False, "type": "str"},
"port_prefix_type": {"required": False, "type": "int"},
"port_security_policy": {"required": False, "type": "str"},
"port_selection_criteria": {"required": False, "type": "str",
"choices": ["src-mac", "dst-mac", "src-dst-mac",
"src-ip", "dst-ip", "src-dst-ip"]},
"qos_policy": {"required": False, "type": "str"},
"sample_direction": {"required": False, "type": "str",
"choices": ["tx", "rx", "both"]},
"sflow_counter_interval": {"required": False, "type": "int"},
"sflow_sample_rate": {"required": False, "type": "int"},
"sflow_sampler": {"required": False, "type": "str",
"choices": ["enabled", "disabled"]},
"speed": {"required": False, "type": "str",
"choices": ["10half", "10full", "100half",
"100full", "1000auto", "1000fiber",
"1000full", "10000", "40000",
"auto", "auto-module", "100FX-half",
"100FX-full", "100000full", "2500full",
"25000full", "50000full"]},
"speed_mask": {"required": False, "type": "int"},
"stacking_port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["up", "down"]},
"stp_bpdu_guard": {"required": False, "type": "str",
"choices": ["enabled", "disabled"]},
"stp_bpdu_guard_timeout": {"required": False, "type": "int"},
"stp_root_guard": {"required": False, "type": "str",
"choices": ["enabled", "disabled"]},
"stp_state": {"required": False, "type": "str",
"choices": ["enabled", "disabled"]},
"switch_id": {"required": False, "type": "str"},
"type": {"required": False, "type": "str",
"choices": ["physical", "trunk"]},
"untagged_vlans": {"required": False, "type": "list",
"options": {
"vlan_name": {"required": False, "type": "str"}
}},
"virtual_port": {"required": False, "type": "int"},
"vlan": {"required": False, "type": "str"}
}},
"pre_provisioned": {"required": False, "type": "int"},
"staged_image_version": {"required": False, "type": "str"},
"storm_control": {"required": False, "type": "dict",
"options": {
"broadcast": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"rate": {"required": False, "type": "int"},
"unknown_multicast": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"unknown_unicast": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"stp_settings": {"required": False, "type": "dict",
"options": {
"forward_time": {"required": False, "type": "int"},
"hello_time": {"required": False, "type": "int"},
"local_override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"max_age": {"required": False, "type": "int"},
"max_hops": {"required": False, "type": "int"},
"name": {"required": False, "type": "str"},
"pending_timer": {"required": False, "type": "int"},
"revision": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"switch_device_tag": {"required": False, "type": "str"},
"switch_id": {"required": False, "type": "str"},
"switch_log": {"required": False, "type": "dict",
"options": {
"local_override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"severity": {"required": False, "type": "str",
"choices": ["emergency", "alert", "critical",
"error", "warning", "notification",
"information", "debug"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"switch_profile": {"required": False, "type": "str"},
"switch_stp_settings": {"required": False, "type": "dict",
"options": {
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"type": {"required": False, "type": "str",
"choices": ["virtual", "physical"]},
"version": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
JioCloud/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/routers/extensions/__init__.py
|
12133432
| |
KennethPierce/pylearnk
|
refs/heads/fixNogil/master
|
pylearn2/devtools/tests/docscrape.py
|
5
|
"""Extract reference documentation from the NumPy source tree.
"""
import inspect
from nose.plugins.skip import SkipTest
import re
import sys
import types
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data,list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self,n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
def __iter__(self):
for line in self._str:
yield line
class NumpyDocString(object):
def __init__(self, docstring, name=None):
if name:
self.name = name
docstring = docstring.split('\n')
# De-indent paragraph
try:
indent = min(len(s) - len(s.lstrip()) for s in docstring
if s.strip())
except ValueError:
indent = 0
for n,line in enumerate(docstring):
docstring[n] = docstring[n][indent:]
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
'Attributes': [],
'Methods': [],
}
self.section_order = []
self._parse()
def __getitem__(self,key):
return self._parsed_data[key]
def __setitem__(self,key,val):
if not self._parsed_data.has_key(key):
raise ValueError("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ----------
return (len(l1) == len(l2) and l2 == '-'*len(l1))
def _strip(self,doc):
i = 0
j = 0
for i,line in enumerate(doc):
if line.strip(): break
for j,line in enumerate(doc[::-1]):
if line.strip(): break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self,content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
for n,line in enumerate(desc):
desc[n] = line.strip()
desc = desc #'\n'.join(desc)
params.append((arg_name,arg_type,desc))
return params
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, func_name3
"""
functions = []
current_func = None
rest = []
for line in content:
if not line.strip(): continue
if ':' in line:
if current_func:
functions.append((current_func, rest))
r = line.split(':', 1)
current_func = r[0].strip()
r[1] = r[1].strip()
if r[1]:
rest = [r[1]]
else:
rest = []
elif not line.startswith(' '):
if current_func:
functions.append((current_func, rest))
current_func = None
rest = []
if ',' in line:
for func in line.split(','):
func = func.strip()
if func:
functions.append((func, []))
elif line.strip():
current_func = line.strip()
elif current_func is not None:
rest.append(line.strip())
if current_func:
functions.append((current_func, rest))
return functions
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
summary = self._doc.read_to_next_empty_line()
summary_str = "\n".join([s.strip() for s in summary])
if re.compile('^([\w. ]+=)?[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
elif re.compile('^[\w]+\n[-]+').match(summary_str):
self['Summary'] = ''
self._doc.reset()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section, content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize()
for s in section.split(' ')])
if section in ('Parameters', 'Other Parameters', 'Returns',
'Raises', 'Warns', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
self.section_order.append(section)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
self.section_order.append('index')
elif section.lower() == 'see also':
self['See Also'] = self._parse_see_also(content)
self.section_order.append('See Also')
else:
self[section] = content
self.section_order.append(section)
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if not self['Signature']:
return []
return ["*%s*" % self['Signature'].replace('*','\*')] + ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param,param_type,desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self):
if not self['See Also']: return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc in self['See Also']:
if desc or last_had_desc:
out += ['']
out += ["`%s`_" % func]
else:
out[-1] += ", `%s`_" % func
if desc:
out += self._str_indent(desc)
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Other Parameters',
'Returns', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_see_also()
for s in ('Notes','References','Examples'):
out += self._str_section(s)
out += self._str_index()
return '\n'.join(out)
# --
def get_errors(self, check_order=True):
errors = []
self._doc.reset()
for j, line in enumerate(self._doc):
if len(line) > 75:
if hasattr(self, 'name'):
errors.append("%s: Line %d exceeds 75 chars"
": \"%s\"..." % (self.name, j+1, line[:30]))
else:
errors.append("Line %d exceeds 75 chars"
": \"%s\"..." % (j+1, line[:30]))
if check_order:
canonical_order = ['Signature', 'Summary', 'Extended Summary',
'Attributes', 'Methods', 'Parameters',
'Other Parameters','Returns', 'Raises', 'Warns',
'See Also', 'Notes', 'References', 'Examples',
'index']
canonical_order_copy = list(canonical_order)
for s in self.section_order:
while canonical_order_copy and s != canonical_order_copy[0]:
canonical_order_copy.pop(0)
if not canonical_order_copy:
errors.append(
"Sections in wrong order (starting at %s). The"
" right order is %s" % (s, canonical_order))
return errors
def indent(str,indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
class NumpyFunctionDocString(NumpyDocString):
def __init__(self, docstring, function):
super(NumpyFunctionDocString, self).__init__(docstring)
args, varargs, keywords, defaults = inspect.getargspec(function)
if (args and args != ['self']) or varargs or keywords or defaults:
self.has_parameters = True
else:
self.has_parameters = False
def _parse(self):
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
}
return NumpyDocString._parse(self)
def get_errors(self):
errors = NumpyDocString.get_errors(self)
if not self['Signature']:
#errors.append("No function signature") #this check is currently
#too restrictive. Disabling
#it for now
pass
if not self['Summary']:
errors.append("No function summary line")
if len(" ".join(self['Summary'])) > 3*80:
errors.append("Brief function summary is longer than 3 lines")
if not self['Parameters'] and self.has_parameters:
errors.append("No Parameters section")
return errors
class NumpyClassDocString(NumpyDocString):
def __init__(self, docstring, class_name, class_object):
super(NumpyClassDocString, self).__init__(docstring)
self.class_name = class_name
methods = dict((name, func) for name, func
in inspect.getmembers(class_object))
self.has_parameters = False
if '__init__' in methods:
# verify if __init__ is a Python function. If it isn't
# (e.g. the function is implemented in C), getargspec will fail
if not inspect.ismethod(methods['__init__']):
return
args, varargs, keywords, defaults = inspect.getargspec(
methods['__init__'])
if (args and args != ['self']) or varargs or keywords or defaults:
self.has_parameters = True
def _parse(self):
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
'Attributes': [],
'Methods': [],
}
return NumpyDocString._parse(self)
def __str__(self):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Attributes', 'Methods', 'Parameters', 'Raises',
'Warns'):
out += self._str_param_list(param_list)
out += self._str_see_also()
for s in ('Notes','References','Examples'):
out += self._str_section(s)
out += self._str_index()
return '\n'.join(out)
def get_errors(self):
errors = NumpyDocString.get_errors(self)
if not self['Parameters'] and self.has_parameters:
errors.append("%s class has no Parameters section"
% self.class_name)
return errors
class NumpyModuleDocString(NumpyDocString):
"""
Module doc strings: no parsing is done.
"""
def _parse(self):
self.out = []
def __str__(self):
return "\n".join(self._doc._str)
def get_errors(self):
errors = NumpyDocString.get_errors(self, check_order=False)
return errors
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class SphinxDocString(NumpyDocString):
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['**' + name + '**'] + [symbol*(len(name)+4)]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['``%s``' % self['Signature'].replace('*','\*')] + ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param, param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = self._str_indent(self[name])
out += content
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, indent=0):
out = []
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters','Returns','Raises','Warns'):
out += self._str_param_list(param_list)
for s in ('Notes','References','Examples'):
out += self._str_section(s)
# out += self._str_index()
out = self._str_indent(out,indent)
return '\n'.join(out)
class FunctionDoc(object):
def __init__(self,func):
self._f = func
def __str__(self):
out = ''
doclines = inspect.getdoc(self._f) or ''
try:
doc = SphinxDocString(doclines)
except Exception, e:
print '*'*78
print "ERROR: '%s' while parsing `%s`" % (e, self._f)
print '*'*78
#print "Docstring follows:"
#print doclines
#print '='*78
return out
if doc['Signature']:
out += '%s\n' % header('**%s**' %
doc['Signature'].replace('*','\*'), '-')
else:
try:
# try to read signature
argspec = inspect.getargspec(self._f)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*','\*')
out += header('%s%s' % (self._f.__name__, argspec), '-')
except TypeError, e:
out += '%s\n' % header('**%s()**' % self._f.__name__, '-')
out += str(doc)
return out
class ClassDoc(object):
def __init__(self,cls,modulename=''):
if not inspect.isclass(cls):
raise ValueError("Initialise using an object")
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
self._name = cls.__name__
@property
def methods(self):
return [name for name,func in inspect.getmembers(self._cls)
if not name.startswith('_') and callable(func)]
def __str__(self):
out = ''
def replace_header(match):
return '"'*(match.end() - match.start())
for m in self.methods:
print "Parsing `%s`" % m
out += str(FunctionDoc(getattr(self._cls,m))) + '\n\n'
out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
return out
def handle_function(val, name):
func_errors = []
docstring = inspect.getdoc(val)
if docstring is None:
func_errors.append((name, '**missing** function-level docstring'))
else:
func_errors = [
(name, e) for e in
NumpyFunctionDocString(docstring, val).get_errors()
]
return func_errors
def handle_module(val, name):
module_errors = []
docstring = val
if docstring is None:
module_errors.append((name, '**missing** module-level docstring'))
else:
module_errors = [
(name, e) for e in NumpyModuleDocString(docstring).get_errors()
]
return module_errors
def handle_method(method, method_name, class_name):
method_errors = []
# Skip out-of-library inherited methods
module = inspect.getmodule(method)
if module is not None:
if not module.__name__.startswith('pylearn2'):
return method_errors
docstring = inspect.getdoc(method)
if docstring is None:
method_errors.append((class_name, method_name,
'**missing** method-level docstring'))
else:
method_errors = [
(class_name, method_name, e) for e in
NumpyFunctionDocString(docstring, method).get_errors()
]
return method_errors
def handle_class(val, class_name):
cls_errors = []
docstring = inspect.getdoc(val)
if docstring is None:
cls_errors.append((class_name,
'**missing** class-level docstring'))
else:
cls_errors = [
(e,) for e in
NumpyClassDocString(docstring, class_name, val).get_errors()
]
# Get public methods and parse their docstrings
methods = dict(((name, func) for name, func in inspect.getmembers(val)
if not name.startswith('_') and callable(func) and type(func) is not type))
for m_name, method in methods.iteritems():
# skip error check if the method was inherited
# from a parent class (which means it wasn't
# defined in this source file)
if inspect.getmodule(method) is not None:
continue
cls_errors.extend(handle_method(method, m_name, class_name))
return cls_errors
def docstring_errors(filename, global_dict=None):
"""
Run a Python file, parse the docstrings of all the classes
and functions it declares, and return them.
Parameters
----------
filename : str
Filename of the module to run.
global_dict : dict, optional
Globals dictionary to pass along to `execfile()`.
Returns
-------
all_errors : list
Each entry of the list is a tuple, of length 2 or 3, with
format either
(func_or_class_name, docstring_error_description)
or
(class_name, method_name, docstring_error_description)
"""
if global_dict is None:
global_dict = {}
if '__file__' not in global_dict:
global_dict['__file__'] = filename
if '__doc__' not in global_dict:
global_dict['__doc__'] = None
try:
execfile(filename, global_dict)
except SystemExit:
pass
except SkipTest:
raise AssertionError("Couldn't verify format of " + filename +
"due to SkipTest")
all_errors = []
for key, val in global_dict.iteritems():
if not key.startswith('_'):
module_name = ""
if hasattr(inspect.getmodule(val), '__name__'):
module_name = inspect.getmodule(val).__name__
if (inspect.isfunction(val) or inspect.isclass(val)) and\
(inspect.getmodule(val) is None
or module_name == '__builtin__'):
if inspect.isfunction(val):
all_errors.extend(handle_function(val, key))
elif inspect.isclass(val):
all_errors.extend(handle_class(val, key))
elif key == '__doc__':
all_errors.extend(handle_module(val, key))
if all_errors:
all_errors.insert(0, ("%s:"%filename,))
return all_errors
if __name__ == "__main__":
all_errors = docstring_errors(sys.argv[1])
if len(all_errors) > 0:
print "*" * 30, "docstring errors", "*" * 30
for line in all_errors:
print ':'.join(line)
sys.exit(int(len(all_errors) > 0))
|
2014c2g2/2014c2
|
refs/heads/master
|
wsgi/static/Brython2.1.0-20140419-113919/Lib/contextlib.py
|
737
|
"""Utilities for with-statement contexts. See PEP 343."""
import sys
from collections import deque
from functools import wraps
__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack"]
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, *args, **kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, *self.args, **self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, *args, **kwds)
return helper
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
while 1:
exc_context = new_exc.__context__
if exc_context in (None, frame_exc):
break
new_exc = exc_context
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
|
salopensource/sal
|
refs/heads/main
|
inventory/apps.py
|
2
|
from django.apps import AppConfig
class InventoryAppConfig(AppConfig):
name = "inventory"
|
jpshort/odoo
|
refs/heads/8.0
|
addons/website_event/tests/test_ui.py
|
339
|
import openerp.tests
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(True)
class TestUi(openerp.tests.HttpCase):
def test_admin(self):
self.phantom_js("/", "openerp.Tour.run('event', 'test')", "openerp.Tour.tours.event", login='admin')
|
visualputty/Landing-Lights
|
refs/heads/master
|
django/core/management/commands/runfcgi.py
|
674
|
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Runs this project as a FastCGI application. Requires flup."
args = '[various KEY=val options, use `runfcgi help` for help]'
def handle(self, *args, **options):
from django.conf import settings
from django.utils import translation
# Activate the current language, because it won't get activated later.
try:
translation.activate(settings.LANGUAGE_CODE)
except AttributeError:
pass
from django.core.servers.fastcgi import runfastcgi
runfastcgi(args)
def usage(self, subcommand):
from django.core.servers.fastcgi import FASTCGI_HELP
return FASTCGI_HELP
|
rjschof/gem5
|
refs/heads/master
|
src/arch/x86/isa/insts/simd128/floating_point/arithmetic/reciprocal_square_root.py
|
91
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# RSQRTPS
# RSQRTPD
'''
|
xmission/d-note
|
refs/heads/master
|
venv/lib/python2.7/site-packages/Crypto/SelfTest/Cipher/test_ARC4.py
|
117
|
# -*- coding: utf-8 -*-
#
# SelfTest/Cipher/ARC4.py: Self-test for the Alleged-RC4 cipher
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.ARC4"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (plaintext, ciphertext, key[, description]) tuples.
test_data = [
# Test vectors from Eric Rescorla's message with the subject
# "RC4 compatibility testing", sent to the cipherpunks mailing list on
# September 13, 1994.
# http://cypherpunks.venona.com/date/1994/09/msg00420.html
('0123456789abcdef', '75b7878099e0c596', '0123456789abcdef',
'Test vector 0'),
('0000000000000000', '7494c2e7104b0879', '0123456789abcdef',
'Test vector 1'),
('0000000000000000', 'de188941a3375d3a', '0000000000000000',
'Test vector 2'),
('00000000000000000000', 'd6a141a7ec3c38dfbd61', 'ef012345',
'Test vector 3'),
('01' * 512,
'7595c3e6114a09780c4ad452338e1ffd9a1be9498f813d76533449b6778dcad8'
+ 'c78a8d2ba9ac66085d0e53d59c26c2d1c490c1ebbe0ce66d1b6b1b13b6b919b8'
+ '47c25a91447a95e75e4ef16779cde8bf0a95850e32af9689444fd377108f98fd'
+ 'cbd4e726567500990bcc7e0ca3c4aaa304a387d20f3b8fbbcd42a1bd311d7a43'
+ '03dda5ab078896ae80c18b0af66dff319616eb784e495ad2ce90d7f772a81747'
+ 'b65f62093b1e0db9e5ba532fafec47508323e671327df9444432cb7367cec82f'
+ '5d44c0d00b67d650a075cd4b70dedd77eb9b10231b6b5b741347396d62897421'
+ 'd43df9b42e446e358e9c11a9b2184ecbef0cd8e7a877ef968f1390ec9b3d35a5'
+ '585cb009290e2fcde7b5ec66d9084be44055a619d9dd7fc3166f9487f7cb2729'
+ '12426445998514c15d53a18c864ce3a2b7555793988126520eacf2e3066e230c'
+ '91bee4dd5304f5fd0405b35bd99c73135d3d9bc335ee049ef69b3867bf2d7bd1'
+ 'eaa595d8bfc0066ff8d31509eb0c6caa006c807a623ef84c3d33c195d23ee320'
+ 'c40de0558157c822d4b8c569d849aed59d4e0fd7f379586b4b7ff684ed6a189f'
+ '7486d49b9c4bad9ba24b96abf924372c8a8fffb10d55354900a77a3db5f205e1'
+ 'b99fcd8660863a159ad4abe40fa48934163ddde542a6585540fd683cbfd8c00f'
+ '12129a284deacc4cdefe58be7137541c047126c8d49e2755ab181ab7e940b0c0',
'0123456789abcdef',
"Test vector 4"),
]
def get_tests(config={}):
from Crypto.Cipher import ARC4
from common import make_stream_tests
return make_stream_tests(ARC4, "ARC4", test_data)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
kybriainfotech/iSocioCRM
|
refs/heads/8.0
|
doc/cla/stats.py
|
319
|
#!/usr/bin/python
#
# Runme From the repo toplevel directory
#
import subprocess
import glob
import re
import pprint
cla_glob = "doc/cla/*/*.md"
cla = ''.join(open(f).read() for f in glob.glob(cla_glob))
cla = cla.lower()
def cla_signed(email):
if re.match('.*(odoo|openerp|tinyerp).com$',email):
return True
if cla.find(email) != -1:
return True
return False
def blamestat(ext='py'):
r = {}
ok = 0
okl = []
ko = 0
kol = []
p = subprocess.Popen("git ls-tree -r -z --name-only HEAD | grep -z '.%s$' | xargs -0 -n1 git blame --line-porcelain HEAD |grep '^author-mail ' |sort |uniq -c|sort -nr" % ext, shell=True, stdout = subprocess.PIPE)
for i in p.stdout.read().split('\n'):
mo = re.search('(\d+) author-mail <([^ @<]+@[^ @<]+)>',i)
if mo:
lines = int(mo.group(1))
email = mo.group(2)
if cla_signed(email):
ok += lines
okl.append(i)
else:
ko += lines
kol.append(i)
print '-'*60
print 'Stats for ',ext
print '-'*60
print "\nCLA SIGNED %s/%s (%.0f%%)" % (ok, ok+ko, ok*100.0/(ok+ko))
for i in okl:
print i
print "\nCLA MISSING %s/%s (%.0f%%)\n" % (ko, ok+ko, ko*100.0/(ok+ko))
for i in kol:
print i
print
print
blamestat('md')
blamestat('rst')
blamestat('py')
blamestat('js')
blamestat('xml')
blamestat('csv')
|
msincenselee/vnpy
|
refs/heads/vnpy2
|
examples/client_server/client/run_client.py
|
3
|
from vnpy.event import EventEngine
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import MainWindow, create_qapp
from vnpy.gateway.rpc import RpcGateway
from vnpy.app.cta_strategy import CtaStrategyApp
def main():
""""""
qapp = create_qapp()
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
main_engine.add_gateway(RpcGateway)
main_engine.add_app(CtaStrategyApp)
main_window = MainWindow(main_engine, event_engine)
main_window.showMaximized()
qapp.exec()
if __name__ == "__main__":
main()
|
szopu/django
|
refs/heads/master
|
tests/utils_tests/test_module/__init__.py
|
439
|
class SiteMock(object):
_registry = {}
site = SiteMock()
|
hgrif/incubator-airflow
|
refs/heads/master
|
tests/contrib/operators/test_file_to_wasb.py
|
38
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
from airflow import DAG, configuration
from airflow.contrib.operators.file_to_wasb import FileToWasbOperator
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
class TestFileToWasbOperator(unittest.TestCase):
_config = {
'file_path': 'file',
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'wasb_default',
'retries': 3,
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
operator = FileToWasbOperator(
task_id='wasb_operator',
dag=self.dag,
**self._config
)
self.assertEqual(operator.file_path, self._config['file_path'])
self.assertEqual(operator.container_name,
self._config['container_name'])
self.assertEqual(operator.blob_name, self._config['blob_name'])
self.assertEqual(operator.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(operator.load_options, {})
self.assertEqual(operator.retries, self._config['retries'])
operator = FileToWasbOperator(
task_id='wasb_operator',
dag=self.dag,
load_options={'timeout': 2},
**self._config
)
self.assertEqual(operator.load_options, {'timeout': 2})
@mock.patch('airflow.contrib.operators.file_to_wasb.WasbHook',
autospec=True)
def test_execute(self, mock_hook):
mock_instance = mock_hook.return_value
operator = FileToWasbOperator(
task_id='wasb_sensor',
dag=self.dag,
load_options={'timeout': 2},
**self._config
)
operator.execute(None)
mock_instance.load_file.assert_called_once_with(
'file', 'container', 'blob', timeout=2
)
if __name__ == '__main__':
unittest.main()
|
xq262144/hue
|
refs/heads/master
|
desktop/core/ext-py/guppy-0.1.10/guppy/etc/Unpack.py
|
37
|
#._cv_part guppy.etc.Unpack
from opcode import *
import sys
CALL_FUNCTION = opmap['CALL_FUNCTION']
UNPACK_SEQUENCE = opmap['UNPACK_SEQUENCE']
STORE_FAST = opmap['STORE_FAST']
STORE_NAME = opmap['STORE_NAME']
STORE_GLOBAL = opmap['STORE_GLOBAL']
STORE_ATTR = opmap['STORE_ATTR']
STORE_SUBSCR = opmap['STORE_SUBSCR']
STORE_SLICE = opmap['STORE_SLICE+0']
def unpack(x):
try:
1/0
except:
typ, value, traceback = sys.exc_info()
f = traceback.tb_frame.f_back
co = f.f_code
i = f.f_lasti
code = co.co_code
if ord(code[i]) == CALL_FUNCTION and ord(code[i+3]) == UNPACK_SEQUENCE:
i += 3
n = ord(code[i+1]) + ord(code[i+2])*256
i += 3
names = []
while len(names) < n and i < len(code):
op = ord(code[i])
i += 1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i += 2
if op == STORE_FAST:
names.append(co.co_varnames[oparg])
elif op in (STORE_NAME, STORE_ATTR, STORE_GLOBAL):
names.append(co.co_names[oparg])
if op == STORE_SUBSCR or STORE_SLICE <= op <= STORE_SLICE+3:
break
if len(names) == n:
r = []
for name in names:
try:
v = getattr(x, name)
except AttributeError:
v = x[name]
r.append(v)
return r
raise SyntaxError
def test_unpack():
class C:
a=1
b=3
c=4
y = C()
a, b, c = unpack(y)
x = [a,b,c]
class D:
pass
D.a, c, D.b = unpack(y)
x.extend([D.a, c, D.b])
l=[None]
try:
l[0], c, b = unpack(y)
except SyntaxError:
pass
else:
raise RuntimeError
l=[None]
try:
l[1:2], c, b = unpack(y)
except SyntaxError:
pass
else:
raise RuntimeError
y=[]
y = {'a':'A', 'b':'B'}
a, b = unpack(y)
x.extend([a, b])
global g
y['g']='G'
g, b = unpack(y)
x.extend([g, b])
if x != [1, 3, 4, 1, 4, 3, 'A', 'B', 'G', 'B']:
raise RuntimeError
__all__ = 'unpack'
if __name__ == '__main__':
test_unpack()
|
chandranaik/Aligning-of-PIE-with-rfc8033-in-ns3
|
refs/heads/master
|
src/visualizer/visualizer/plugins/show_last_packets.py
|
12
|
import gobject
import gtk
import ns.core
import ns.network
import ns.visualizer
from visualizer.base import InformationWindow
from visualizer.higcontainer import HIGContainer
from kiwi.ui.objectlist import ObjectList, Column
## ShowLastPackets class
class ShowLastPackets(InformationWindow):
## @var win
# window
## @var visualizer
# visualizer
## @var viz_node
# visualizer node
## @var node
# the node
## @var tx_list
# packet transmit list
## @var rx_list
# packet receive list
## @var drop_list
# packet drop list
## @var packet_capture_options
# packet capture options
## @var packet_filter_widget
# packet filter widget
## @var packet_filter_list
# list of TypeIdConfig instances
## @var op_AND_button
# AND button
## @var op_OR_button
# OR button
class PacketList(gtk.ScrolledWindow):
"""
PacketList class
"""
## @var table_model
# table model
(
COLUMN_TIME,
COLUMN_INTERFACE,
COLUMN_SIZE,
COLUMN_CONTENTS,
) = range(4)
def __init__(self):
"""
Initializer
@param self this object
"""
super(ShowLastPackets.PacketList, self).__init__()
self.set_properties(hscrollbar_policy=gtk.POLICY_AUTOMATIC,
vscrollbar_policy=gtk.POLICY_AUTOMATIC)
self.table_model = gtk.ListStore(*([str]*4))
treeview = gtk.TreeView(self.table_model)
treeview.show()
self.add(treeview)
def add_column(descr, colid):
column = gtk.TreeViewColumn(descr, gtk.CellRendererText(), text=colid)
treeview.append_column(column)
add_column("Time", self.COLUMN_TIME)
add_column("Interface", self.COLUMN_INTERFACE)
add_column("Size", self.COLUMN_SIZE)
add_column("Contents", self.COLUMN_CONTENTS)
def update(self, node, packet_list):
"""!
Update function
@param self this object
@param node the node
@param packet_list packet list
@return none
"""
self.table_model.clear()
for sample in packet_list:
tree_iter = self.table_model.append()
if sample.device is None:
interface_name = "(unknown)"
else:
interface_name = ns.core.Names.FindName(sample.device)
if not interface_name:
interface_name = "(interface %i)" % sample.device.GetIfIndex()
self.table_model.set(tree_iter,
self.COLUMN_TIME, str(sample.time.GetSeconds()),
self.COLUMN_INTERFACE, interface_name,
self.COLUMN_SIZE, str(sample.packet.GetSize ()),
self.COLUMN_CONTENTS, str(sample.packet)
)
def __init__(self, visualizer, node_index):
"""
Initializer
@param self this object
@param visualizer the visualizer object
@param node_index the node index
"""
InformationWindow.__init__(self)
self.win = gtk.Dialog(parent=visualizer.window,
flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.win.connect("response", self._response_cb)
self.win.set_title("Last packets for node %i" % node_index)
self.visualizer = visualizer
self.viz_node = visualizer.get_node(node_index)
self.node = ns.network.NodeList.GetNode(node_index)
def smart_expand(expander, vbox):
if expander.get_expanded():
vbox.set_child_packing(expander, expand=True, fill=True, padding=0, pack_type=gtk.PACK_START)
else:
vbox.set_child_packing(expander, expand=False, fill=False, padding=0, pack_type=gtk.PACK_START)
main_hbox = gtk.HBox(False, 4)
main_hbox.show()
main_vbox = gtk.VBox(False, 4)
main_vbox.show()
self.win.vbox.add(main_hbox)
main_hbox.add(main_vbox)
self.tx_list = self.PacketList()
self.tx_list.show()
group = gtk.Expander("Last transmitted packets")
group.show()
group.add(self.tx_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
self.rx_list = self.PacketList()
self.rx_list.show()
group = gtk.Expander("Last received packets")
group.show()
group.add(self.rx_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
self.drop_list = self.PacketList()
self.drop_list.show()
group = gtk.Expander("Last dropped packets")
group.show()
group.add(self.drop_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
# Packet Filter
# - options
self.packet_capture_options = ns.visualizer.PyViz.PacketCaptureOptions()
self.packet_capture_options.numLastPackets = 100
packet_filter_vbox = gtk.VBox(False, 4)
packet_filter_vbox.show()
main_hbox.add(packet_filter_vbox)
sel_buttons_box = gtk.HButtonBox()
sel_buttons_box.show()
packet_filter_vbox.pack_start(sel_buttons_box, False, False, 4)
select_all_button = gobject.new(gtk.Button, label="Sel. All", visible=True)
select_none_button = gobject.new(gtk.Button, label="Sel. None", visible=True)
sel_buttons_box.add(select_all_button)
sel_buttons_box.add(select_none_button)
self.packet_filter_widget = ObjectList([
Column('selected', title="Sel.", data_type=bool, editable=True),
Column('name', title="Header"),
], sortable=True)
self.packet_filter_widget.show()
packet_filter_vbox.pack_start(self.packet_filter_widget, True, True, 4)
class TypeIdConfig(object):
__slots__ = ['name', 'selected', 'typeid']
self.packet_filter_list = [] # list of TypeIdConfig instances
Header = ns.core.TypeId.LookupByName("ns3::Header")
Trailer = ns.core.TypeId.LookupByName("ns3::Trailer")
for typeid_i in range(ns.core.TypeId.GetRegisteredN()):
typeid = ns.core.TypeId.GetRegistered(typeid_i)
# check if this is a header or trailer subtype
typeid_tmp = typeid
type_is_good = False
while 1:
if typeid_tmp == Header or typeid_tmp == Trailer:
type_is_good = True
break
if typeid_tmp.HasParent():
typeid_tmp = typeid_tmp.GetParent()
else:
break
if not type_is_good:
continue
if typeid in [Header, Trailer]:
continue
c = TypeIdConfig()
c.selected = True
c.name = typeid.GetName()
c.typeid = typeid
self.packet_filter_list.append(c)
self.packet_filter_widget.add_list(self.packet_filter_list)
def update_capture_options():
if self.op_AND_button.props.active:
self.packet_capture_options.mode = ns.visualizer.PyViz.PACKET_CAPTURE_FILTER_HEADERS_AND
else:
self.packet_capture_options.mode = ns.visualizer.PyViz.PACKET_CAPTURE_FILTER_HEADERS_OR
self.packet_capture_options.numLastPackets = 100
self.packet_capture_options.headers = [c.typeid for c in self.packet_filter_list if c.selected]
self.visualizer.simulation.lock.acquire()
try:
self.visualizer.simulation.sim_helper.SetPacketCaptureOptions(
self.node.GetId(), self.packet_capture_options)
finally:
self.visualizer.simulation.lock.release()
def sel_all_cb(bt):
for c in self.packet_filter_list:
c.selected = True
self.packet_filter_widget.refresh()
update_capture_options()
def sel_none_cb(bt):
for c in self.packet_filter_list:
c.selected = False
self.packet_filter_widget.refresh()
update_capture_options()
select_all_button.connect("clicked", sel_all_cb)
select_none_button.connect("clicked", sel_none_cb)
op_buttons_box = gtk.HButtonBox()
op_buttons_box.show()
packet_filter_vbox.pack_start(op_buttons_box, False, False, 4)
self.op_AND_button = gobject.new(gtk.RadioButton, label="AND", visible=True)
self.op_OR_button = gobject.new(gtk.RadioButton, label="OR", visible=True, group=self.op_AND_button)
op_buttons_box.add(self.op_AND_button)
op_buttons_box.add(self.op_OR_button)
self.op_OR_button.props.active = True
self.op_AND_button.connect("toggled", lambda b: update_capture_options())
def cell_edited(l, obj, attribute):
update_capture_options()
self.packet_filter_widget.connect("cell-edited", cell_edited)
update_capture_options()
self.visualizer.add_information_window(self)
self.win.set_default_size(600, 300)
self.win.show()
def _response_cb(self, win, response):
"""!
Response callback function
@param self this object
@param win the window
@param response the response
@return none
"""
self.win.destroy()
self.visualizer.remove_information_window(self)
def update(self):
"""!
Update function
@param self this object
@return none
"""
last_packets = self.visualizer.simulation.sim_helper.GetLastPackets(self.node.GetId())
self.tx_list.update(self.node, last_packets.lastTransmittedPackets)
self.rx_list.update(self.node, last_packets.lastReceivedPackets)
self.drop_list.update(self.node, last_packets.lastDroppedPackets)
def populate_node_menu(viz, node, menu):
menu_item = gtk.MenuItem("Show Last Packets")
menu_item.show()
def _show_it(dummy_menu_item):
ShowLastPackets(viz, node.node_index)
menu_item.connect("activate", _show_it)
menu.add(menu_item)
def register(viz):
viz.connect("populate-node-menu", populate_node_menu)
|
heiden-deng/anaconda
|
refs/heads/master
|
pyanaconda/simpleconfig.py
|
8
|
#
# simpleconifg.py - representation of a simple configuration file (sh-like)
#
# Copyright (C) 1999-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Author(s): Matt Wilson <msw@redhat.com>
# Jeremy Katz <katzj@redhat.com>
# Will Woods <wwoods@redhat.com>
# Brian C. Lane <bcl@redhat.com>
#
import os
import shutil
import shlex
from pipes import _safechars
import tempfile
from pyanaconda.iutil import upperASCII, eintr_retry_call
def unquote(s):
return ' '.join(shlex.split(s))
def quote(s, always=False):
""" If always is set it returns a quoted value
"""
if not always:
for c in s:
if c not in _safechars:
break
else:
return s
return '"'+s.replace('"', '\\"')+'"'
def find_comment(s):
""" Look for a # comment outside of a quoted string.
If there are no quotes, find the last # in the string.
:param str s: string to check for comment and quotes
:returns: index of comment or None
:rtype: int or None
Handles comments inside quotes and quotes inside quotes.
"""
q = None
for i in range(len(s)):
if not q and s[i] == '#':
return i
# Ignore quotes inside other quotes
if s[i] in "'\"":
if s[i] == q:
q = None
elif q is None:
q = s[i]
return None
class SimpleConfigFile(object):
""" Edit values in a configuration file without changing comments.
Supports KEY=VALUE lines and ignores everything else.
Supports adding new keys.
Supports deleting keys.
Preserves comment, blank lines and comments on KEY lines
Does not support duplicate key entries.
"""
def __init__(self, filename=None, read_unquote=True, write_quote=True,
always_quote=False):
self.filename = filename
self.read_unquote = read_unquote
self.write_quote = write_quote
self.always_quote = always_quote
self.reset()
def reset(self):
self._lines = []
self.info = {}
def read(self, filename=None):
""" passing filename will override the filename passed to init.
save the lines into self._lines and the key/value pairs into
self.info
"""
filename = filename or self.filename
with open(filename) as f:
for line in f:
self._lines.append(line)
key, value, _comment = self._parseline(line)
if key:
self.info[key] = value
def write(self, filename=None, use_tmp=True):
""" passing filename will override the filename passed to init.
"""
filename = filename or self.filename
if not filename:
return None
if use_tmp:
tmpf = tempfile.NamedTemporaryFile(mode="w", delete=False)
tmpf.write(str(self))
tmpf.close()
# Move the temporary file (with 0600 permissions) over the top of the
# original and preserve the original's permissions
filename = os.path.realpath(filename)
if os.path.exists(filename):
m = os.stat(filename).st_mode
else:
m = int('0100644', 8)
shutil.move(tmpf.name, filename)
eintr_retry_call(os.chmod, filename, m)
else:
# write directly to the file
with open(filename, "w") as fobj:
fobj.write(str(self))
def set(self, *args):
for key, value in args:
self.info[upperASCII(key)] = value
def unset(self, *keys):
for key in (upperASCII(k) for k in keys):
if key in self.info:
del self.info[key]
def get(self, key):
return self.info.get(upperASCII(key), "")
def _parseline(self, line):
""" parse a line into a key, value and comment
:param str line: Line to be parsed
:returns: Tuple of key, value, comment
:rtype: tuple
Handle comments and optionally unquote quoted strings
Returns (key, value, comment) or (None, None, comment)
key is always UPPERCASE and comment may by "" if none was found.
"""
s = line.strip()
# Look for a # outside any quotes
comment = ""
comment_index = find_comment(s)
if comment_index is not None:
comment = s[comment_index:]
s = s[:comment_index] # remove from comment to EOL
key, eq, val = s.partition('=')
key = key.strip()
val = val.strip()
if self.read_unquote:
val = unquote(val)
if key != '' and eq == '=':
return (upperASCII(key), val, comment)
else:
return (None, None, comment)
def _kvpair(self, key, comment=""):
value = self.info[key]
if self.write_quote or self.always_quote:
value = quote(value, self.always_quote)
if comment:
comment = " " + comment
return key + '=' + value + comment + "\n"
def __str__(self):
""" Return the file that was read, replacing existing keys with new values
removing keys that have been deleted and adding new keys.
"""
oldkeys = []
s = ""
for line in self._lines:
key, _value, comment = self._parseline(line)
if key is None:
s += line
else:
if key not in self.info:
continue
oldkeys.append(key)
s += self._kvpair(key, comment)
# Add new keys
for key in self.info:
if key not in oldkeys:
s += self._kvpair(key)
return s
|
sangjin3/webida-server
|
refs/heads/master
|
src/ext/cordova-weinre/weinre.build/scripts/build-client-html.py
|
7
|
#!/usr/bin/env python
# ---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ---
import os
import re
import sys
import json
import optparse
#--------------------------------------------------------------------
def main():
if len(sys.argv) < 2:
error("expecting parameters [web directory]")
webDir = sys.argv[1]
iFileName = os.path.join(webDir, "client/inspector.html")
oFileName = os.path.join(webDir, "client/index.html")
moduleDir = os.path.join(webDir, "weinre")
if not os.path.exists(iFileName): error("file does not exist: %s" % iFileName)
if not os.path.exists(moduleDir): error("module directory does not exist: %s" % moduleDir)
if not os.path.isdir(moduleDir): error("module directory is not a directory: %s" % moduleDir)
createIndexFile(iFileName, oFileName, moduleDir)
#--------------------------------------------------------------------
def createIndexFile(iFileName, oFileName, moduleDir):
with open(iFileName) as iFile: lines = iFile.readlines()
pattern_head_start = re.compile(r"^\s*<meta http-equiv=\"content-type\".*$")
pattern_head_end = re.compile(r"^\s*</head>\s$")
newLines = []
foundStart = False
foundEnd = False
for line in lines:
if pattern_head_start.match(line):
foundStart = True
newLines.append(line)
newLines.append("<!-- ========== weinre additions: starting ========== -->\n")
newLines.extend([
'<meta http-equiv="X-UA-Compatible" content="chrome=1">\n'
'<link rel="shortcut icon" href="../images/weinre-icon-64x64.png">\n',
'<title>weinre</title>\n',
'<script type="text/javascript" src="weinre/browser-support-check.js"></script>\n',
'<script type="text/javascript" src="weinre/hacks.js"></script>\n',
'<script type="text/javascript" src="../modjewel.js"></script>\n',
'<script type="text/javascript">modjewel.require("modjewel").warnOnRecursiveRequire(true)</script>\n',
])
for module in getModules(moduleDir):
newLines.append('<script type="text/javascript" src="../%s"></script>\n' % module)
newLines.append("<!-- ========== weinre additions: done ========== -->\n")
elif pattern_head_end.match(line):
foundEnd = True
newLines.append("<!-- ========== weinre additions: starting ========== -->\n")
newLines.append('<link rel="stylesheet" type="text/css" href="weinre/client.css">\n')
newLines.append('<script type="text/javascript" src="../interfaces/all-json-idls-min.js"></script>\n')
newLines.append('<script type="text/javascript">modjewel.require("weinre/client/Client").main()</script>\n')
newLines.append("<!-- ========== weinre additions: done ========== -->\n")
newLines.append(line)
else:
newLines.append(line)
if not foundStart: error("didn't find the location to start writing")
if not foundEnd: error("didn't find the location to finish writing")
with open(oFileName, "w") as oFile: oFile.writelines(newLines)
log("created %s" % oFileName)
#--------------------------------------------------------------------
def getModules(moduleDir):
modules = []
for module in os.listdir(os.path.join(moduleDir, "common")):
modules.append("weinre/common/%s" % module)
for module in os.listdir(os.path.join(moduleDir, "client")):
modules.append("weinre/client/%s" % module)
return modules
#--------------------------------------------------------------------
def log(message):
message = "%s: %s" % (PROGRAM_NAME, message)
print >>sys.stderr, message
#--------------------------------------------------------------------
def error(message):
log(message)
sys.exit(-1)
#--------------------------------------------------------------------
PROGRAM_NAME = os.path.basename(sys.argv[0])
main()
|
kriswuollett/grpc
|
refs/heads/master
|
src/python/grpcio_tests/tests/qps/client_runner.py
|
23
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines behavior for WHEN clients send requests.
Each client exposes a non-blocking send_request() method that the
ClientRunner invokes either periodically or in response to some event.
"""
import abc
import threading
import time
class ClientRunner:
"""Abstract interface for sending requests from clients."""
__metaclass__ = abc.ABCMeta
def __init__(self, client):
self._client = client
@abc.abstractmethod
def start(self):
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
raise NotImplementedError()
class OpenLoopClientRunner(ClientRunner):
def __init__(self, client, interval_generator):
super(OpenLoopClientRunner, self).__init__(client)
self._is_running = False
self._interval_generator = interval_generator
self._dispatch_thread = threading.Thread(
target=self._dispatch_requests, args=())
def start(self):
self._is_running = True
self._client.start()
self._dispatch_thread.start()
def stop(self):
self._is_running = False
self._client.stop()
self._dispatch_thread.join()
self._client = None
def _dispatch_requests(self):
while self._is_running:
self._client.send_request()
time.sleep(next(self._interval_generator))
class ClosedLoopClientRunner(ClientRunner):
def __init__(self, client, request_count):
super(ClosedLoopClientRunner, self).__init__(client)
self._is_running = False
self._request_count = request_count
# Send a new request on each response for closed loop
self._client.add_response_callback(self._send_request)
def start(self):
self._is_running = True
self._client.start()
for _ in xrange(self._request_count):
self._client.send_request()
def stop(self):
self._is_running = False
self._client.stop()
self._client = None
def _send_request(self, client, response_time):
if self._is_running:
client.send_request()
|
jorsea/odoo-addons
|
refs/heads/8.0
|
purchase_uom_prices_uoms/__init__.py
|
8
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from . import purchase
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
vitiral/micropython
|
refs/heads/master
|
tests/pyb/switch.py
|
117
|
from pyb import Switch
sw = Switch()
print(sw())
sw.callback(print)
sw.callback(None)
|
arpitbbhayani/penny
|
refs/heads/master
|
app/service/webcomicsService.py
|
1
|
import time
import app
from app.utils import readable
from app.dao.items.webcomicDao import WebcomicDao
from app.crawlers.xkcd_crawler import XkcdComic
def create_comic(comic_id, initial_data):
dao = WebcomicDao()
webcomicObj = dao.create_comic(comic_id, initial_data)
return webcomicObj
def get_comic(comic_id):
dao = WebcomicDao()
comic = dao.get_comic_by_id(comic_id)
return comic
def get_comic_urls(comic_id):
dao = WebcomicDao()
urls = dao.get_comic_urls(comic_id)
return urls
def get_comics_ids():
dao = WebcomicDao()
ids = dao.get_comics_ids()
return ids
def sync(comic_id):
dao, crawler = None, None
if comic_id == 'xkcd':
dao = WebcomicDao()
crawler = XkcdComic(app.config.XKCD_CRAWLER_URL)
else:
raise Exception('Invalid webcomic id %s' % comic_id)
urls = get_comic_urls(comic_id)
only_urls = set([url.get('url') for url in urls])
comic_links = crawler.get_comics(only_urls)
last_sync = time.time()
links_count = len(comic_links) + len(urls)
dao.add_links(comic_id, comic_links)
dao.update_comic(comic_id, links_count=links_count, last_sync=last_sync)
return {
'id': comic_id,
'links_count': links_count,
'last_sync': readable.from_ts(last_sync)
}
def get_comics_meta_info():
comics = WebcomicDao().get_all_comics_meta_info()
return comics
|
NTesla/pattern
|
refs/heads/master
|
pattern/graph/commonsense.py
|
21
|
#### PATTERN | COMMONSENSE #########################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
from codecs import BOM_UTF8
from urllib import urlopen
from itertools import chain
from __init__ import Graph, Node, Edge, bfs
from __init__ import WEIGHT, CENTRALITY, EIGENVECTOR, BETWEENNESS
import os
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
#### COMMONSENSE SEMANTIC NETWORK ##################################################################
#--- CONCEPT ---------------------------------------------------------------------------------------
class Concept(Node):
def __init__(self, *args, **kwargs):
""" A concept in the sematic network.
"""
Node.__init__(self, *args, **kwargs)
self._properties = None
@property
def halo(self, depth=2):
""" Returns the concept halo: a list with this concept + surrounding concepts.
This is useful to reason more fluidly about the concept,
since the halo will include latent properties linked to nearby concepts.
"""
return self.flatten(depth=depth)
@property
def properties(self):
""" Returns the top properties in the concept halo, sorted by betweenness centrality.
The return value is a list of concept id's instead of Concepts (for performance).
"""
if self._properties is None:
g = self.graph.copy(nodes=self.halo)
p = (n for n in g.nodes if n.id in self.graph.properties)
p = [n.id for n in reversed(sorted(p, key=lambda n: n.centrality))]
self._properties = p
return self._properties
def halo(concept, depth=2):
return concept.flatten(depth=depth)
def properties(concept, depth=2, centrality=BETWEENNESS):
g = concept.graph.copy(nodes=halo(concept, depth))
p = (n for n in g.nodes if n.id in concept.graph.properties)
p = [n.id for n in reversed(sorted(p, key=lambda n: getattr(n, centrality)))]
return p
#--- RELATION --------------------------------------------------------------------------------------
class Relation(Edge):
def __init__(self, *args, **kwargs):
""" A relation between two concepts, with an optional context.
For example, "Felix is-a cat" is in the "media" context, "tiger is-a cat" in "nature".
"""
self.context = kwargs.pop("context", None)
Edge.__init__(self, *args, **kwargs)
#--- HEURISTICS ------------------------------------------------------------------------------------
# Similarity between concepts is measured using a featural approach:
# a comparison of the features/properties that are salient in each concept's halo.
# Commonsense.similarity() takes an optional "heuristic" parameter to tweak this behavior.
# It is a tuple of two functions:
# 1) function(concept) returns a list of salient properties (or other),
# 2) function(concept1, concept2) returns the cost to traverse this edge (0.0-1.0).
COMMONALITY = (
# Similarity heuristic that only traverses relations between properties.
lambda concept: concept.properties,
lambda edge: 1 - int(edge.context == "properties" and \
edge.type != "is-opposite-of"))
#--- COMMONSENSE -----------------------------------------------------------------------------------
class Commonsense(Graph):
def __init__(self, data=os.path.join(MODULE, "commonsense.csv"), **kwargs):
""" A semantic network of commonsense, using different relation types:
- is-a,
- is-part-of,
- is-opposite-of,
- is-property-of,
- is-related-to,
- is-same-as,
- is-effect-of.
"""
Graph.__init__(self, **kwargs)
self._properties = None
# Load data from the given path,
# a CSV-file of (concept1, relation, concept2, context, weight)-items.
if data is not None:
s = open(data).read()
s = s.strip(BOM_UTF8)
s = s.decode("utf-8")
s = ((v.strip("\"") for v in r.split(",")) for r in s.splitlines())
for concept1, relation, concept2, context, weight in s:
self.add_edge(concept1, concept2,
type = relation,
context = context,
weight = min(int(weight)*0.1, 1.0))
@property
def concepts(self):
return self.nodes
@property
def relations(self):
return self.edges
@property
def properties(self):
""" Yields all concepts that are properties (i.e., adjectives).
For example: "cold is-property-of winter" => "cold".
"""
if self._properties is None:
#self._properties = set(e.node1.id for e in self.edges if e.type == "is-property-of")
self._properties = (e for e in self.edges if e.context == "properties")
self._properties = set(chain(*((e.node1.id, e.node2.id) for e in self._properties)))
return self._properties
def add_node(self, id, *args, **kwargs):
""" Returns a Concept (Node subclass).
"""
self._properties = None
kwargs.setdefault("base", Concept)
return Graph.add_node(self, id, *args, **kwargs)
def add_edge(self, id1, id2, *args, **kwargs):
""" Returns a Relation between two concepts (Edge subclass).
"""
self._properties = None
kwargs.setdefault("base", Relation)
return Graph.add_edge(self, id1, id2, *args, **kwargs)
def remove(self, x):
self._properties = None
Graph.remove(self, x)
def similarity(self, concept1, concept2, k=3, heuristic=COMMONALITY):
""" Returns the similarity of the given concepts,
by cross-comparing shortest path distance between k concept properties.
A given concept can also be a flat list of properties, e.g. ["creepy"].
The given heuristic is a tuple of two functions:
1) function(concept) returns a list of salient properties,
2) function(edge) returns the cost for traversing this edge (0.0-1.0).
"""
if isinstance(concept1, basestring):
concept1 = self[concept1]
if isinstance(concept2, basestring):
concept2 = self[concept2]
if isinstance(concept1, Node):
concept1 = heuristic[0](concept1)
if isinstance(concept2, Node):
concept2 = heuristic[0](concept2)
if isinstance(concept1, list):
concept1 = [isinstance(n, Node) and n or self[n] for n in concept1]
if isinstance(concept2, list):
concept2 = [isinstance(n, Node) and n or self[n] for n in concept2]
h = lambda id1, id2: heuristic[1](self.edge(id1, id2))
w = 0.0
for p1 in concept1[:k]:
for p2 in concept2[:k]:
p = self.shortest_path(p1, p2, heuristic=h)
w += 1.0 / (p is None and 1e10 or len(p))
return w / k
def nearest_neighbors(self, concept, concepts=[], k=3):
""" Returns the k most similar concepts from the given list.
"""
return sorted(concepts, key=lambda candidate: self.similarity(concept, candidate, k), reverse=True)
similar = neighbors = nn = nearest_neighbors
def taxonomy(self, concept, depth=3, fringe=2):
""" Returns a list of concepts that are descendants of the given concept, using "is-a" relations.
Creates a subgraph of "is-a" related concepts up to the given depth,
then takes the fringe (i.e., leaves) of the subgraph.
"""
def traversable(node, edge):
# Follow parent-child edges.
return edge.node2 == node and edge.type == "is-a"
if not isinstance(concept, Node):
concept = self[concept]
g = self.copy(nodes=concept.flatten(depth, traversable))
g = g.fringe(depth=fringe)
g = [self[n.id] for n in g if n != concept]
return g
field = semantic_field = taxonomy
#g = Commonsense()
#print(g.nn("party", g.field("animal")))
#print(g.nn("creepy", g.field("animal")))
#### COMMONSENSE DATA ##############################################################################
#--- NODEBOX.NET/PERCEPTION ------------------------------------------------------------------------
def download(path=os.path.join(MODULE, "commonsense.csv"), threshold=50):
""" Downloads commonsense data from http://nodebox.net/perception.
Saves the data as commonsense.csv which can be the input for Commonsense.load().
"""
s = "http://nodebox.net/perception?format=txt&robots=1"
s = urlopen(s).read()
s = s.decode("utf-8")
s = s.replace("\\'", "'")
# Group relations by author.
a = {}
for r in ([v.strip("'") for v in r.split(", ")] for r in s.split("\n")):
if len(r) == 7:
a.setdefault(r[-2], []).append(r)
# Iterate authors sorted by number of contributions.
# 1) Authors with 50+ contributions can define new relations and context.
# 2) Authors with 50- contributions (or robots) can only reinforce existing relations.
a = sorted(a.items(), cmp=lambda v1, v2: len(v2[1]) - len(v1[1]))
r = {}
for author, relations in a:
if author == "" or author.startswith("robots@"):
continue
if len(relations) < threshold:
break
# Sort latest-first (we prefer more recent relation types).
relations = sorted(relations, cmp=lambda r1, r2: r1[-1] > r2[-1])
# 1) Define new relations.
for concept1, relation, concept2, context, weight, author, date in relations:
id = (concept1, relation, concept2)
if id not in r:
r[id] = [None, 0]
if r[id][0] is None and context is not None:
r[id][0] = context
for author, relations in a:
# 2) Reinforce existing relations.
for concept1, relation, concept2, context, weight, author, date in relations:
id = (concept1, relation, concept2)
if id in r:
r[id][1] += int(weight)
# Export CSV-file.
s = []
for (concept1, relation, concept2), (context, weight) in r.items():
s.append("\"%s\",\"%s\",\"%s\",\"%s\",%s" % (
concept1, relation, concept2, context, weight))
f = open(path, "w")
f.write(BOM_UTF8)
f.write("\n".join(s).encode("utf-8"))
f.close()
def json():
""" Returns a JSON-string with the data from commonsense.csv.
Each relation is encoded as a [concept1, relation, concept2, context, weight] list.
"""
f = lambda s: s.replace("'", "\\'").encode("utf-8")
s = []
g = Commonsense()
for e in g.edges:
s.append("\n\t['%s', '%s', '%s', '%s', %.2f]" % (
f(e.node1.id),
f(e.type),
f(e.node2.id),
f(e.context),
e.weight
))
return "commonsense = [%s];" % ", ".join(s)
#download("commonsense.csv", threshold=50)
#open("commonsense.js", "w").write(json())
|
jimhw/trading-with-python
|
refs/heads/master
|
cookbook/runConsoleUntilInterrupt.py
|
77
|
# -*- coding: utf-8 -*-
"""
example on how to run a console script until interrupted by keyboard
@author: jev
"""
from time import sleep
counter = 0
print 'Press Ctr-C to stop loop'
try:
while True:
print counter
counter += 1
sleep(1)
except KeyboardInterrupt:
print 'All done'
|
slohse/ansible
|
refs/heads/devel
|
test/units/modules/network/exos/test_exos_command.py
|
14
|
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.exos import exos_command
from units.modules.utils import set_module_args
from .exos_module import TestExosModule, load_fixture
class TestExosCommandModule(TestExosModule):
module = exos_command
def setUp(self):
super(TestExosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.exos.exos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestExosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_exos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Switch :'))
def test_exos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Switch :'))
def test_exos_command_wait_for(self):
wait_for = 'result[0] contains "Switch :"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_exos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_exos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_exos_command_match_any(self):
wait_for = ['result[0] contains "Switch"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_exos_command_match_all(self):
wait_for = ['result[0] contains "Switch"',
'result[0] contains "Switch :"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_exos_command_match_all_failure(self):
wait_for = ['result[0] contains "Switch :"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_exos_command_configure_error(self):
commands = ['disable ospf']
set_module_args({
'commands': commands,
'_ansible_check_mode': True,
})
result = self.execute_module()
self.assertEqual(
result['warnings'],
['only show commands are supported when using check mode, not executing `disable ospf`']
)
|
Bachmann1234/letsencrypt
|
refs/heads/master
|
acme/acme/fields_test.py
|
62
|
"""Tests for acme.fields."""
import datetime
import unittest
import pytz
from acme import jose
class FixedTest(unittest.TestCase):
"""Tests for acme.fields.Fixed."""
def setUp(self):
from acme.fields import Fixed
self.field = Fixed('name', 'x')
def test_decode(self):
self.assertEqual('x', self.field.decode('x'))
def test_decode_bad(self):
self.assertRaises(jose.DeserializationError, self.field.decode, 'y')
def test_encode(self):
self.assertEqual('x', self.field.encode('x'))
def test_encode_override(self):
self.assertEqual('y', self.field.encode('y'))
class RFC3339FieldTest(unittest.TestCase):
"""Tests for acme.fields.RFC3339Field."""
def setUp(self):
self.decoded = datetime.datetime(2015, 3, 27, tzinfo=pytz.utc)
self.encoded = '2015-03-27T00:00:00Z'
def test_default_encoder(self):
from acme.fields import RFC3339Field
self.assertEqual(
self.encoded, RFC3339Field.default_encoder(self.decoded))
def test_default_encoder_naive_fails(self):
from acme.fields import RFC3339Field
self.assertRaises(
ValueError, RFC3339Field.default_encoder, datetime.datetime.now())
def test_default_decoder(self):
from acme.fields import RFC3339Field
self.assertEqual(
self.decoded, RFC3339Field.default_decoder(self.encoded))
def test_default_decoder_raises_deserialization_error(self):
from acme.fields import RFC3339Field
self.assertRaises(
jose.DeserializationError, RFC3339Field.default_decoder, '')
class ResourceTest(unittest.TestCase):
"""Tests for acme.fields.Resource."""
def setUp(self):
from acme.fields import Resource
self.field = Resource('x')
def test_decode_good(self):
self.assertEqual('x', self.field.decode('x'))
def test_decode_wrong(self):
self.assertRaises(jose.DeserializationError, self.field.decode, 'y')
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
andrebellafronte/stoq
|
refs/heads/master
|
plugins/optical/tests/test_optical_ui.py
|
2
|
# -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import contextlib
import gtk
import mock
from stoqlib.database.runtime import StoqlibStore
from stoqlib.database.viewable import Viewable
from stoqlib.domain.person import Person
from stoqlib.domain.sale import Sale
from stoqlib.domain.workorder import WorkOrderCategory
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.personeditor import ClientEditor
from stoqlib.gui.editors.producteditor import ProductEditor
from stoqlib.gui.editors.workordereditor import WorkOrderEditor
from stoqlib.gui.events import PrintReportEvent
from stoqlib.gui.wizards.personwizard import PersonRoleWizard
from stoqlib.gui.wizards.workorderquotewizard import WorkOrderQuoteWizard
from stoqlib.lib.dateutils import localdate
from stoqlib.lib.parameters import sysparam
from stoqlib.reporting.sale import SaleOrderReport
from stoq.gui.test.baseguitest import BaseGUITest
from stoq.gui.sales import SalesApp
from stoq.gui.services import ServicesApp
from ..medicssearch import OpticalMedicSearch, MedicSalesSearch
from ..opticaleditor import MedicEditor, OpticalWorkOrderEditor
from ..opticalhistory import OpticalPatientDetails
from ..opticalreport import OpticalWorkOrderReceiptReport
from ..opticalui import OpticalUI
from ..opticalwizard import OpticalSaleQuoteWizard, MedicRoleWizard
from .test_optical_domain import OpticalDomainTest
__tests__ = 'plugins.optical.opticalui.py'
class TestOpticalUI(BaseGUITest, OpticalDomainTest):
@classmethod
def setUpClass(cls):
cls.ui = OpticalUI()
BaseGUITest.setUpClass()
def test_optical_sales(self):
app = self.create_app(SalesApp, u'sales')
for sales in app.results:
sales.open_date = localdate(2012, 1, 1)
sales.confirm_date = localdate(2012, 2, 3)
sales.close_date = localdate(2012, 4, 5)
self.check_app(app, u'sales-optical-plugin')
self.window.hide_app(empty=True)
def test_optical_sales_pre_sale(self):
app = self.create_app(SalesApp, u'sales')
action = app.uimanager.get_action(
'/ui/menubar/ExtraMenubarPH/OpticalMenu/OpticalPreSale')
assert action, action
with mock.patch('plugins.optical.opticalui.run_dialog') as run_dialog_:
self.activate(action)
args, kwargs = run_dialog_.call_args
self.assertEquals(args[0], OpticalSaleQuoteWizard)
self.assertEquals(args[1], app)
self.assertTrue(isinstance(args[2], StoqlibStore))
with mock.patch('plugins.optical.opticalui.warning') as warning_:
# We need to mock this since it's a cached_function and thus it
# will return None for some time even if we create an inventory here
with mock.patch.object(app, 'has_open_inventory') as has_open_inventory:
has_open_inventory.return_value = True
self.activate(action)
warning_.assert_called_once_with(
"You cannot create a pre-sale with an open inventory.")
def test_optical_sales_medic_search(self):
app = self.create_app(SalesApp, u'sales')
action = app.uimanager.get_action(
'/ui/menubar/ExtraMenubarPH/OpticalMenu/OpticalMedicSearch')
assert action, action
with mock.patch('plugins.optical.opticalui.run_dialog') as run_dialog_:
self.activate(action)
args, kwargs = run_dialog_.call_args
self.assertEquals(args[0], OpticalMedicSearch)
self.assertEquals(args[1], None)
self.assertTrue(isinstance(args[2], StoqlibStore))
self.assertEquals(kwargs['hide_footer'], True)
def test_optical_sales_medic_sales_search(self):
app = self.create_app(SalesApp, u'sales')
action = app.uimanager.get_action(
'/ui/menubar/ExtraMenubarPH/OpticalMenu/OpticalMedicSaleItems')
assert action, action
with mock.patch('plugins.optical.opticalui.run_dialog') as run_dialog_:
self.activate(action)
args, kwargs = run_dialog_.call_args
self.assertEquals(args[0], MedicSalesSearch)
self.assertEquals(args[1], None)
self.assertTrue(isinstance(args[2], StoqlibStore))
self.assertEquals(kwargs['hide_footer'], True)
def test_product_editor(self):
product = self.create_product(stock=10)
editor = ProductEditor(store=self.store, model=product)
self.check_editor(editor, u'editor-product-optical-plugin')
def test_work_order_editor(self):
sysparam.set_bool(self.store,
'ALLOW_OUTDATED_OPERATIONS',
True)
sale = self.create_sale()
workorder = self.create_workorder()
workorder.identifier = 1234
workorder.open_date = localdate(2012, 1, 1)
workorder.sale = sale
editor = WorkOrderEditor(store=self.store, model=workorder)
self.check_editor(editor, u'editor-work-order-optical-plugin')
# FIXME: baseditor should probably add an api for getting a list
# of buttons
print_button = editor.main_dialog.action_area.get_children()[0]
assert print_button.get_label() == gtk.STOCK_PRINT
with mock.patch('plugins.optical.opticalui.print_report') as print_report_:
self.click(print_button)
print_report_.assert_called_once_with(
OpticalWorkOrderReceiptReport, [editor.model])
def test_run_optical_sale_quote_wizard(self):
sale = self.create_sale()
sale.status = Sale.STATUS_QUOTE
sale.add_sellable(self.create_sellable())
wo_category = WorkOrderCategory(name=u'category', store=self.store)
workorder = self.create_workorder()
workorder.category = wo_category
workorder.sale = sale
name = 'stoqlib.gui.base.dialogs.run_dialog_internal'
with mock.patch(name) as run_dialog_internal:
# Without a Sale that has workorders -> optical wizard
run_dialog(WorkOrderQuoteWizard, None, self.store, sale)
args, kwargs = run_dialog_internal.call_args
self.assertTrue(isinstance(args[0], OpticalSaleQuoteWizard))
# Without a Sale, normal wizard
run_dialog_internal.reset_mock()
run_dialog(WorkOrderQuoteWizard, None, self.store, None)
args, kwargs = run_dialog_internal.call_args
self.assertTrue(isinstance(args[0], WorkOrderQuoteWizard))
def test_run_medic_role_wizard(self):
name = 'stoqlib.gui.base.dialogs.run_dialog_internal'
with mock.patch(name) as run_dialog_internal:
run_dialog(PersonRoleWizard, None, self.store, MedicEditor)
args, kwargs = run_dialog_internal.call_args
self.assertTrue(isinstance(args[0], MedicRoleWizard))
def test_person_editor(self):
client = self.create_client()
editor = ClientEditor(self.store, client, role_type=Person.ROLE_INDIVIDUAL)
self.check_editor(editor, 'editor-client-optical-plugin')
with mock.patch('plugins.optical.opticalui.run_dialog') as run_dialog_:
self.click(editor.patient_history_button)
run_dialog_.assert_called_once_with(OpticalPatientDetails, editor, self.store, client)
def test_product_search(self):
from stoqlib.gui.search.productsearch import ProductSearch
from stoqlib.gui.search.costcentersearch import CostCenterSearch
# ProductSearch should have new columns
search = ProductSearch(self.store)
search.search.refresh()
self.check_search(search, 'search-optical-product-search')
# Cost center search does not use a viewable, so it should not have columns
assert not issubclass(CostCenterSearch.search_spec, Viewable)
search = CostCenterSearch(self.store)
search.search.refresh()
self.check_search(search, 'search-optical-cost-center-search')
def test_services_app(self):
product = self.create_product()
product.manufacturer = self.create_product_manufacturer(u'Empresa Tal')
workorder = self.create_workorder()
workorder.identifier = 99412
workorder.open_date = localdate(2013, 12, 7)
workorder.sellable = product.sellable
app = self.create_app(ServicesApp, u'services')
app.search.refresh()
self.check_app(app, u'services-optical-plugin')
@mock.patch('plugins.optical.opticalui.api.new_store')
@mock.patch('plugins.optical.opticalui.run_dialog')
def test_edit_optical_details(self, run_dialog, new_store):
new_store.return_value = self.store
product = self.create_product()
work_order = self.create_workorder()
work_order.identifier = 666
work_order.open_date = localdate(2014, 01, 31)
work_order.sellable = product.sellable
app = self.create_app(ServicesApp, u'services')
app.search.refresh()
for wo_view in app.search.results:
if wo_view.work_order == work_order:
break
self.assertIsNotNone(wo_view)
app.search.results.select(wo_view)
action = app.uimanager.get_action(
'/menubar/AppMenubarPH/OrderMenu/OpticalDetails')
with contextlib.nested(
mock.patch.object(self.store, 'commit'),
mock.patch.object(self.store, 'close')):
self.activate(action)
run_dialog.assert_called_once_with(OpticalWorkOrderEditor, None,
self.store, work_order)
@mock.patch('plugins.optical.opticalui.print_report')
def test_print_report_event(self, print_report):
# Emitting with something different from SaleOrderReport
rv = PrintReportEvent.emit(object)
self.assertFalse(rv)
self.assertEquals(print_report.call_count, 0)
# Emitting with SaleOrderReport, but without workorders
sale = self.create_sale()
rv = PrintReportEvent.emit(SaleOrderReport, sale)
self.assertFalse(rv)
self.assertEquals(print_report.call_count, 0)
# Emitting with SaleOrderReport and with workorders
optical_wo = self.create_optical_work_order()
optical_wo.work_order.sale = sale
rv = PrintReportEvent.emit(SaleOrderReport, sale)
self.assertTrue(rv)
print_report.assert_called_once_with(OpticalWorkOrderReceiptReport,
[optical_wo.work_order])
|
pozetroninc/micropython
|
refs/heads/stable
|
tests/basics/int_divmod_intbig.py
|
45
|
# test integer floor division and modulo
# this tests bignum modulo
a = 987654321987987987987987987987
b = 19
print(a % b)
print(a % -b)
print(-a % b)
print(-a % -b)
|
jmcarbo/openerp7
|
refs/heads/master
|
openerp/addons/base/module/wizard/base_import_language.py
|
105
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from tempfile import TemporaryFile
from openerp import tools
from openerp.osv import osv, fields
class base_language_import(osv.osv_memory):
""" Language Import """
_name = "base.language.import"
_description = "Language Import"
_columns = {
'name': fields.char('Language Name', size=64 , required=True),
'code': fields.char('ISO Code', size=5, help="ISO Language and Country code, e.g. en_US", required=True),
'data': fields.binary('File', required=True),
'overwrite': fields.boolean('Overwrite Existing Terms',
help="If you enable this option, existing translations (including custom ones) "
"will be overwritten and replaced by those in this file"),
}
def import_lang(self, cr, uid, ids, context=None):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0])
if this.overwrite:
context.update(overwrite=True)
fileobj = TemporaryFile('w+')
try:
fileobj.write(base64.decodestring(this.data))
# now we determine the file format
fileobj.seek(0)
first_line = fileobj.readline().strip().replace('"', '').replace(' ', '')
fileformat = first_line.endswith("type,name,res_id,src,value") and 'csv' or 'po'
fileobj.seek(0)
tools.trans_load_data(cr, fileobj, fileformat, this.code, lang_name=this.name, context=context)
finally:
fileobj.close()
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
clarkfitzg/dask
|
refs/heads/master
|
dask/dataframe/tests/test_optimize_dataframe.py
|
3
|
import pytest
from operator import getitem
from toolz import valmap, merge
from dask.dataframe.optimize import dataframe_from_ctable
import dask.dataframe as dd
import pandas as pd
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
dfs = list(dsk.values())
def test_column_optimizations_with_bcolz_and_rewrite():
try:
import bcolz
except ImportError:
return
bc = bcolz.ctable([[1, 2, 3], [10, 20, 30]], names=['a', 'b'])
func = lambda x: x
for cols in [None, 'abc', ['abc']]:
dsk2 = merge(dict((('x', i),
(dataframe_from_ctable, bc, slice(0, 2), cols, {}))
for i in [1, 2, 3]),
dict((('y', i),
(getitem, ('x', i), (list, ['a', 'b'])))
for i in [1, 2, 3]))
expected = dict((('y', i), (dataframe_from_ctable,
bc, slice(0, 2), (list, ['a', 'b']), {}))
for i in [1, 2, 3])
result = dd.optimize(dsk2, [('y', i) for i in [1, 2, 3]])
assert result == expected
def test_castra_column_store():
try:
from castra import Castra
except ImportError:
return
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
with Castra(template=df) as c:
c.extend(df)
df = c.to_dask()
df2 = df[['x']]
dsk = dd.optimize(df2.dask, df2._keys())
assert dsk == {(df2._name, 0): (Castra.load_partition, c, '0--2',
(list, ['x']))}
|
ThomasMiconi/htmresearch
|
refs/heads/master
|
projects/sequence_classification/run_encoder_only.py
|
11
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Run sequence classification experiment with simple encoder model
1. Encode each element with RDSE encoder
2. Calculate prediction using kNN based on average overalap distance
3. Search for the optimal encoder resolution
"""
import pickle
import time
import matplotlib.pyplot as plt
import multiprocessing
from util_functions import *
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
plt.ion()
def runEncoderOverDataset(encoder, dataset):
activeColumnsData = []
for i in range(dataset.shape[0]):
activeColumnsTrace = []
for element in dataset[i, :]:
encoderOutput = encoder.encode(element)
activeColumns = set(np.where(encoderOutput > 0)[0])
activeColumnsTrace.append(activeColumns)
activeColumnsData.append(activeColumnsTrace)
return activeColumnsData
def calcualteEncoderModelWorker(taskQueue, resultQueue, *args):
while True:
nextTask = taskQueue.get()
print "Next task is : ", nextTask
if nextTask is None:
break
nBuckets = nextTask["nBuckets"]
accuracyColumnOnly = calculateEncoderModelAccuracy(nBuckets, *args)
resultQueue.put({nBuckets: accuracyColumnOnly})
print "Column Only model, Resolution: {} Accuracy: {}".format(
nBuckets, accuracyColumnOnly)
return
def calculateEncoderModelAccuracy(nBuckets, numCols, w, trainData, trainLabel):
maxValue = np.max(trainData)
minValue = np.min(trainData)
resolution = (maxValue - minValue) / nBuckets
encoder = RandomDistributedScalarEncoder(resolution, w=w, n=numCols)
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
distMatColumnTrain = calculateDistanceMatTrain(activeColumnsTrain)
meanAccuracy, outcomeColumn = calculateAccuracy(distMatColumnTrain,
trainLabel, trainLabel)
accuracyColumnOnly = np.mean(outcomeColumn)
return accuracyColumnOnly
def searchForOptimalEncoderResolution(nBucketList, trainData, trainLabel, numCols, w):
numCPU = multiprocessing.cpu_count()
numWorker = numCPU
# Establish communication queues
taskQueue = multiprocessing.JoinableQueue()
resultQueue = multiprocessing.Queue()
for nBuckets in nBucketList:
taskQueue.put({"nBuckets": nBuckets})
for _ in range(numWorker):
taskQueue.put(None)
jobs = []
for i in range(numWorker):
print "Start process ", i
p = multiprocessing.Process(target=calcualteEncoderModelWorker,
args=(taskQueue, resultQueue, numCols, w, trainData, trainLabel))
jobs.append(p)
p.daemon = True
p.start()
while not taskQueue.empty():
time.sleep(0.1)
accuracyVsResolution = np.zeros((len(nBucketList,)))
while not resultQueue.empty():
exptResult = resultQueue.get()
nBuckets = exptResult.keys()[0]
accuracyVsResolution[nBucketList.index(nBuckets)] = exptResult[nBuckets]
return accuracyVsResolution
if __name__ == "__main__":
# datasetName = "SyntheticData"
# dataSetList = listDataSets(datasetName)
datasetName = 'UCR_TS_Archive_2015'
dataSetList = listDataSets(datasetName)
# dataSetList = ["synthetic_control"]
for dataName in dataSetList:
trainData, trainLabel, testData, testLabel = loadDataset(dataName, datasetName)
numTest = len(testLabel)
numTrain = len(trainLabel)
sequenceLength = len(trainData[0])
classList = np.unique(trainLabel)
if max(numTrain, numTest) * sequenceLength > 600 * 600:
print "skip this large dataset for now"
continue
print
print "Processing {}".format(dataName)
print "Train Sample # {}, Test Sample # {}".format(numTrain, numTest)
print "Sequence Length {} Class # {}".format(sequenceLength, len(classList))
try:
searchResolution = pickle.load(
open('results/optimalEncoderResolution/{}'.format(dataName), 'r'))
continue
except:
print "Search encoder parameters for this dataset"
EuclideanDistanceMat = calculateEuclideanDistanceMat(testData, trainData)
outcomeEuclidean = []
for i in range(testData.shape[0]):
predictedClass = one_nearest_neighbor(trainData, trainLabel, testData[i,:])
correct = 1 if predictedClass == testLabel[i] else 0
outcomeEuclidean.append(correct)
# print "{} out of {} done outcome: {}".format(i, testData.shape[0], correct)
print
print "Euclidean model accuracy: {}".format(np.mean(outcomeEuclidean))
print
accuracyEuclideanDist = np.mean(outcomeEuclidean)
# # Use SDR overlap instead of Euclidean distance
print "Running Encoder model"
from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder
maxValue = np.max(trainData)
minValue = np.min(trainData)
numCols = 2048
w = 41
try:
searchResolution = pickle.load(
open('results/optimalEncoderResolution/{}'.format(dataName), 'r'))
optimalResolution = searchResolution['optimalResolution']
except:
nBucketList = range(20, 200, 10)
accuracyVsResolution = searchForOptimalEncoderResolution(
nBucketList, trainData, trainLabel, numCols, w)
optNumBucket = nBucketList[np.argmax(np.array(accuracyVsResolution))]
optimalResolution = (maxValue - minValue)/optNumBucket
searchResolution = {
'nBucketList': nBucketList,
'accuracyVsResolution': accuracyVsResolution,
'optimalResolution': optimalResolution
}
# save optimal resolution for future use
outputFile = open('results/optimalEncoderResolution/{}'.format(dataName), 'w')
pickle.dump(searchResolution, outputFile)
outputFile.close()
print "optimal bucket # {}".format((maxValue - minValue)/optimalResolution)
encoder = RandomDistributedScalarEncoder(optimalResolution, w=w, n=numCols)
print "encoding train data ..."
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
print "encoding test data ..."
activeColumnsTest = runEncoderOverDataset(encoder, testData)
print "calculate column distance matrix ..."
distMatColumnTest = calculateDistanceMat(activeColumnsTest, activeColumnsTrain)
meanAccuracy, outcomeColumn = calculateAccuracy(distMatColumnTest, trainLabel, testLabel)
accuracyColumnOnly = np.mean(outcomeColumn)
print
print "Column Only model, Accuracy: {}".format(accuracyColumnOnly)
expResults = {'accuracyEuclideanDist': accuracyEuclideanDist,
'accuracyColumnOnly': accuracyColumnOnly,
'EuclideanDistanceMat': EuclideanDistanceMat,
'distMatColumnTest': distMatColumnTest}
outputFile = open('results/modelPerformance/{}_columnOnly'.format(dataName), 'w')
pickle.dump(expResults, outputFile)
outputFile.close()
|
hxddh/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/ehow.py
|
195
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class EHowIE(InfoExtractor):
IE_NAME = 'eHow'
_VALID_URL = r'https?://(?:www\.)?ehow\.com/[^/_?]*_(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.ehow.com/video_12245069_hardwood-flooring-basics.html',
'md5': '9809b4e3f115ae2088440bcb4efbf371',
'info_dict': {
'id': '12245069',
'ext': 'flv',
'title': 'Hardwood Flooring Basics',
'description': 'Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...',
'uploader': 'Erick Nathan',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r'(?:file|source)=(http[^\'"&]*)', webpage, 'video URL')
final_url = compat_urllib_parse_unquote(video_url)
uploader = self._html_search_meta('uploader', webpage)
title = self._og_search_title(webpage).replace(' | eHow', '')
return {
'id': video_id,
'url': final_url,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
'uploader': uploader,
}
|
espadrine/opera
|
refs/heads/master
|
chromium/src/third_party/pefile/setup.py
|
3
|
try:
from setuptools import setup
except ImportError, excp:
from distutils.core import setup
import pefile
import os
os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
os.environ['COPYFILE_DISABLE'] = 'true'
setup(name = 'pefile',
version = pefile.__version__,
description = 'Python PE parsing module',
author = pefile.__author__,
author_email = pefile.__contact__,
url = 'http://code.google.com/p/pefile/',
download_url = 'http://pefile.googlecode.com/files/pefile-%s.tar.gz' % pefile.__version__,
platforms = ['any'],
classifiers = ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'],
long_description = "\n".join(pefile.__doc__.split('\n')),
py_modules = ['pefile', 'peutils'] )
|
gabrielfalcao/sure
|
refs/heads/master
|
tests/test_custom_assertions.py
|
2
|
# -*- coding: utf-8 -*-
"""
Test custom assertions.
"""
from sure import expect, assertion, chain, chainproperty
from sure.magic import is_cpython
def test_custom_assertion():
"test extending sure with a custom assertion."
class Response(object):
def __init__(self, return_code):
self.return_code = return_code
@assertion
def return_code(self, return_code):
if self.negative:
assert return_code != self.obj.return_code, "Expected was a return code different from {0}.".format(return_code)
else:
assert return_code == self.obj.return_code, "Expected return code is: {0}\nGiven return code was: {1}".format(
return_code, self.obj.return_code)
return True
expect(Response(200)).should.have.return_code(200)
expect(Response(200)).shouldnt.have.return_code(201)
def test_custom_chain_method():
"test extending sure with a custom chain method."
class Response(object):
def __init__(self, headers, return_code):
self.headers = headers
self.return_code = return_code
@chain
def header(self, header_name):
expect(self.obj.headers).should.have.key(header_name)
return self.obj.headers[header_name]
# FIXME(TF): 'must' does not sound right in this method chain.
# it should rather be ...header("foo").which.equals("bar")
# however, which is an assertionproperty in AssertionBuilder
# and is not a monkey patched property.
if is_cpython:
Response({"foo": "bar", "bar": "foo"}, 200).should.have.header("foo").must.be.equal("bar")
else:
expect(expect(Response({"foo": "bar", "bar": "foo"}, 200)).should.have.header("foo")).must.be.equal("bar")
def test_custom_chain_property():
"test extending sure with a custom chain property."
class Response(object):
magic = 41
@chainproperty
def having(self):
return self
@chainproperty
def implement(self):
return self
@assertion
def attribute(self, name):
has_it = hasattr(self.obj, name)
if self.negative:
assert not has_it, "Expected was that object {0} does not have attribute {1}".format(
self.obj, name)
else:
assert has_it, "Expected was that object {0} has attribute {1}".format(
self.obj, name)
return True
expect(Response).having.attribute("magic")
expect(Response).doesnt.implement.attribute("nomagic")
|
Eyjafjallajokull/pyga
|
refs/heads/master
|
tests/test_selection_strategy/test_rank.py
|
1
|
from unittest import TestCase
from unittest.mock import MagicMock
from pyga import Candidate
from pyga import Fitness
from pyga import Population
from pyga import Random
from pyga import RankSelection
class RankSelectionStrategyTestCase(TestCase):
def setUp(self):
self.random = Random()
self.obj = RankSelection(self.random)
def create_candidate(self, fitness=None, is_natural=True):
candidate = Candidate()
candidate.fitness = Fitness(fitness, is_natural=is_natural)
return candidate
def test_select_result_type(self):
population = Population()
population.append(self.create_candidate(fitness=1))
results = self.obj.select(population, 1)
self.assertIsInstance(results, Population)
def test_select_result_size(self):
population = Population()
population.append(self.create_candidate(fitness=1))
population.append(self.create_candidate(fitness=2))
population.append(self.create_candidate(fitness=3))
population.append(self.create_candidate(fitness=4))
for selection_size in range(len(population)):
results = self.obj.select(population, selection_size+1)
self.assertEqual(selection_size+1, len(results))
def test_select_proper_items(self):
population = Population()
population.append(self.create_candidate(fitness=1))
population.append(self.create_candidate(fitness=2))
population.append(self.create_candidate(fitness=3))
population.append(self.create_candidate(fitness=4))
selection_size = 2
self.random.float = MagicMock(side_effect=[0.5, 0.9])
results = self.obj.select(population, selection_size)
self.assertEqual(results[0].fitness, 3)
self.assertEqual(results[1].fitness, 4)
def test_select_proper_items_natural_false(self):
population = Population()
population.append(self.create_candidate(fitness=-4, is_natural=False))
population.append(self.create_candidate(fitness=-3, is_natural=False))
population.append(self.create_candidate(fitness=-2, is_natural=False))
population.append(self.create_candidate(fitness=-1, is_natural=False))
selection_size = 2
self.random.float = MagicMock(side_effect=[0.5, 0.9])
results = self.obj.select(population, selection_size)
self.assertEqual(results[0].fitness, -2)
self.assertEqual(results[1].fitness, -1)
|
Intel-tensorflow/tensorflow
|
refs/heads/master
|
tensorflow/python/compiler/tensorrt/test/int32_test.py
|
5
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test conversion of graphs involving INT32 tensors and operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class ExcludeUnsupportedInt32Test(trt_test.TfTrtIntegrationTestBase):
"""Test exclusion of ops which are not supported in INT32 mode by TF-TRT"""
def _ConstOp(self, shape, dtype):
return constant_op.constant(np.random.randn(*shape), dtype=dtype)
def GraphFn(self, x):
dtype = x.dtype
b = self._ConstOp((4, 10), dtype)
x = math_ops.matmul(x, b)
b = self._ConstOp((10,), dtype)
x = nn.bias_add(x, b)
return array_ops.identity(x, name='output_0')
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.int32, [[100, 4]], [[100, 10]])
def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
self.DisableNonTrtOptimizers()
def GetMaxBatchSize(self, run_params):
"""Returns the max_batch_size that the converter should use for tests."""
if run_params.dynamic_engine:
return None
return 100
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return []
class CalibrationInt32Support(trt_test.TfTrtIntegrationTestBase):
"""Test execution of calibration with int32 input"""
def GraphFn(self, inp):
# Can use any op that is converted to TRT with int32 inputs
inp_transposed = array_ops.transpose(inp, [0, 3, 2, 1], name='transpose_0')
return array_ops.identity(inp_transposed, name='output_0')
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.int32, [[3, 4, 5, 6]],
[[3, 6, 5, 4]])
def ShouldRunTest(self, run_params):
# Although test passes with all configurations but only
# execute INT8 with use_calibration=True because
# that is the purpose of the test.
return trt_test.IsQuantizationWithCalibration(
run_params), 'test calibration and INT8'
def ExpectedEnginesToBuild(self, run_params):
return ['TRTEngineOp_0']
if __name__ == '__main__':
test.main()
|
betrisey/home-assistant
|
refs/heads/dev
|
tests/conftest.py
|
5
|
"""Setup some common test helper things."""
import functools
import logging
import pytest
import requests_mock as _requests_mock
from homeassistant import util
from homeassistant.util import location
from .common import async_test_home_assistant
from .test_util.aiohttp import mock_aiohttp_client
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
def test_real(func):
"""Force a function to require a keyword _test_real to be passed in."""
@functools.wraps(func)
def guard_func(*args, **kwargs):
real = kwargs.pop('_test_real', None)
if not real:
raise Exception('Forgot to mock or pass "_test_real=True" to %s',
func.__name__)
return func(*args, **kwargs)
return guard_func
# Guard a few functions that would make network connections
location.detect_location_info = test_real(location.detect_location_info)
location.elevation = test_real(location.elevation)
util.get_local_ip = lambda: '127.0.0.1'
@pytest.fixture
def hass(loop):
"""Fixture to provide a test instance of HASS."""
hass = loop.run_until_complete(async_test_home_assistant(loop))
yield hass
loop.run_until_complete(hass.async_stop())
@pytest.fixture
def requests_mock():
"""Fixture to provide a requests mocker."""
with _requests_mock.mock() as m:
yield m
@pytest.fixture
def aioclient_mock():
"""Fixture to mock aioclient calls."""
with mock_aiohttp_client() as mock_session:
yield mock_session
|
DLR-SC/RepoGuard
|
refs/heads/master
|
test/repoguard_test/checks/test_rejecttabs.py
|
2
|
#
# Copyright 2008 Adam Byrtek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the RejectTabs check.
"""
from configobj import ConfigObj
import mock
from repoguard.checks import rejecttabs
class TestRejectTabs(object):
def setup_method(self, _):
self._file_mock = mock.MagicMock()
self._transaction = mock.Mock()
self._transaction.get_files = mock.Mock(return_value={"filepath":"A"})
self._config = ConfigObj()
self._rejecttabs = rejecttabs.RejectTabs(self._transaction)
def test_leading_tab(self):
patcher = mock.patch("repoguard.checks.rejecttabs.open", create=True)
open_mock = patcher.start()
try:
self._init_file_mock(open_mock, 'if True:\n\tprint "Hello world"')
assert not self._rejecttabs.run(self._config).success
finally:
patcher.stop()
def test_leading_mixed_tab_space(self):
patcher = mock.patch("repoguard.checks.rejecttabs.open", create=True)
open_mock = patcher.start()
try:
self._init_file_mock(open_mock, 'if True:\n \tprint "Hello world"')
assert not self._rejecttabs.run(self._config).success
finally:
patcher.stop()
def test_inner_tab(self):
patcher = mock.patch("repoguard.checks.rejecttabs.open", create=True)
open_mock = patcher.start()
try:
self._init_file_mock(open_mock, 'if True:\n print "\tHello world"')
assert self._rejecttabs.run(self._config).success
finally:
patcher.stop()
def _init_file_mock(self, open_mock, file_content):
open_mock.return_value = self._file_mock
self._file_mock.__iter__ = lambda _: iter(file_content.splitlines())
def test_skip_binary_files(self):
self._transaction.has_property = mock.Mock(return_value=True)
self._transaction.get_property = mock.Mock(return_value="application/octet-stream")
assert self._rejecttabs.run(self._config).success
|
aichingm/electron
|
refs/heads/master
|
tools/mac/copy-locales.py
|
46
|
#!/usr/bin/env python
# Copyright (c) 2013 GitHub, Inc.
# Use of this source code is governed by the MIT license that can be
# found in the LICENSE file.
import errno
import optparse
import os
import shutil
import sys
def main(argv):
parser = optparse.OptionParser()
usage = 'usage: %s [options ...] src dest locale_list'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-d', dest='dash_to_underscore', action="store_true",
default=False,
help='map "en-US" to "en" and "-" to "_" in locales')
(options, arglist) = parser.parse_args(argv)
if len(arglist) < 4:
print 'ERROR: need src, dest and list of locales'
return 1
src = arglist[1]
dest = arglist[2]
locales = arglist[3:]
for locale in locales:
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
dirname = locale
if options.dash_to_underscore:
if locale == 'en-US':
dirname = 'en'
else:
dirname = locale.replace('-', '_')
dirname = os.path.join(dest, dirname + '.lproj')
safe_mkdir(dirname)
shutil.copy2(os.path.join(src, locale + '.pak'),
os.path.join(dirname, 'locale.pak'))
def safe_mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
skoppisetty/idigbio-appliance
|
refs/heads/master
|
dataingestion/services/ingestion_manager.py
|
1
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Suresh Koppisetty <suresh.koppisetty@gmail.com>, University of
# Florida
#
# Extra layer of abstraction to ingestion manager - Currently using Celery Manager
#
# This software may be used and distributed according to the terms of the
# MIT license: http://www.opensource.org/licenses/mit-license.php
import logging, os, sys
# import celery manager - replace this with new manager
from dataingestion.services.manager.celery_manager import Celery_manager
from dataingestion.services import user_config
from dataingestion.services import model
from dataingestion.services.user_config import (get_user_config,
set_user_config, rm_user_config)
logger = logging.getLogger("iDigBioSvc.ingestion_manager")
class IngestServiceException(Exception):
def __init__(self, msg, reason=''):
Exception.__init__(self, msg)
self.reason = reason
class InputCSVException(Exception):
def __init__(self, msg, reason=''):
Exception.__init__(self, msg)
self.reason = reason
# manager has to handle setup and start_upload api
manager = Celery_manager()
def setup(worker_thread_count):
# Todo: Needs to handle this thread count
# It can be done in main.py while running celery
manager.setup(worker_thread_count)
def start_upload(values, task_id):
if values == None:
logger.debug("Resume last batch.")
oldbatch = model.load_last_batch()
if oldbatch.finish_time and oldbatch.FailCount == 0:
logger.error("Last batch already finished, why resume?")
error = 'Last batch already finished, why resume?'
return error
# Assign local variables with values in DB.
values = {}
values[user_config.CSV_PATH] = oldbatch.CSVfilePath
values['RightsLicense'] = oldbatch.RightsLicense
else:
logger.debug("starting new task")
# Initial checks before the task is added to the queue.
path = values[user_config.CSV_PATH]
if not os.path.exists(path):
error = 'CSV file \"' + path + '\" does not exist.'
logger.error(error)
return error
# raise ValueError(error)
elif os.path.isdir(path):
error = 'The CSV path is a directory.'
logger.error(error)
return error
# raise ValueError(error)
logger.debug("All checks done")
try:
error = manager.start_upload(values, task_id)
return error
except:
logger.debug("Unexpected error:" + str(sys.exc_info()[0]))
return str(sys.exc_info()[0])
|
NikolaYolov/invenio_backup
|
refs/heads/master
|
modules/miscutil/lib/logicutils_tests.py
|
16
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for logic library."""
import unittest
from invenio.logicutils import expr, Expr, to_cnf, pl_true
from invenio.testutils import make_test_suite, run_test_suite
class exprExprOpsTest(unittest.TestCase):
"""Testing expr and Expr against one another."""
def test_trivial_expr(self):
"""logicutils - create trivial Expr with expr()"""
self.assertEqual(expr('a | b'), Expr('|', 'a', 'b'))
def test_deep_expr(self):
"""logicutils - create deep Expr with expr()"""
self.assertEqual(expr('a | b | c | d | e'),
Expr('|', Expr('|', Expr('|', Expr('|', 'a', 'b'), 'c'), 'd'), 'e'))
class toCNFTest(unittest.TestCase):
"""Testing conversion to conjunctive normal form"""
def test_singleton(self):
"""logicutils - singletons are already in CNF"""
self.assertEqual(to_cnf(expr('a')),
Expr('a'))
def test_complex_example_Norvig(self):
"""logicutils - (P&Q) | (~P & ~Q) in CNF"""
self.assertEqual(str(to_cnf('(P&Q) | (~P & ~Q)')),
str('((~P | P) & (~Q | P) & (~P | Q) & (~Q | Q))'))
def test_ORed_pair(self):
"""logicutils - ORed pair should be in CNF"""
self.assertEqual(to_cnf(expr('a | b')),
Expr('|', 'a', 'b'))
def test_ANDed_pair(self):
"""logicutils - ANDed pair should be in CNF"""
self.assertEqual(to_cnf(expr('a & b')),
Expr('&', 'a', 'b'))
class prop_logicTest(unittest.TestCase):
"""Testing basic propositional logic functionality"""
P = Expr('P')
def test_pl_true_P_true(self):
"""logicutils - True thing is evaluated as such"""
self.assertEqual(pl_true(self.P, {self.P: True}),
True)
def test_pl_true_P_false(self):
"""logicutils - False thing is evaluated as such"""
self.assertEqual(pl_true(self.P, {self.P: False}),
False)
TEST_SUITE = make_test_suite(exprExprOpsTest, toCNFTest, prop_logicTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
apple/swift-lldb
|
refs/heads/stable
|
packages/Python/lldbsuite/test/commands/expression/completion-in-lambda-and-unnnamed-class/TestCompletionInLambdaAndUnnamedClass.py
|
13
|
from lldbsuite.test import lldbinline
from lldbsuite.test import decorators
lldbinline.MakeInlineTest(__file__, globals(),)
|
CeltonMcGrath/TACTIC
|
refs/heads/master
|
src/tactic/ui/panel/static_table_layout_wdg.py
|
6
|
###########################################################
#
# Copyright (c) 2009, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["StaticTableLayoutWdg"]
from pyasm.common import Common
from pyasm.search import Search, SearchKey
from pyasm.web import DivWdg, Table
from pyasm.widget import ThumbWdg, IconWdg
from table_layout_wdg import FastTableLayoutWdg
from tactic.ui.widget import SingleButtonWdg
class StaticTableLayoutWdg(FastTableLayoutWdg):
ARGS_KEYS = {
"mode": {
'description': "Determines whether to draw with widgets or just use the raw data",
'type': 'SelectWdg',
'values': 'widget|raw',
'order': 0,
'category': 'Required'
},
"search_type": {
'description': "search type that this panels works with",
'type': 'TextWdg',
'order': 1,
'category': 'Required'
},
"view": {
'description': "view to be displayed",
'type': 'TextWdg',
'order': 2,
'category': 'Required'
},
"element_names": {
'description': "Comma delimited list of elemnent to view",
'type': 'TextWdg',
'order': 0,
'category': 'Optional'
},
"show_shelf": {
'description': "Determines whether or not to show the action shelf",
'type': 'SelectWdg',
'values': 'true|false',
'order': 1,
'category': 'Optional'
},
}
def get_display(my):
#my.chunk_size = 10000
if my.kwargs.get("do_search") != "false":
my.handle_search()
my.mode = my.kwargs.get("mode")
if my.mode != 'raw':
my.mode = 'widget'
# extraneous variables inherited from TableLayoutWdg
my.edit_permission = False
top = DivWdg()
my.set_as_panel(top)
top.add_class("spt_sobject_top")
inner = DivWdg()
top.add(inner)
inner.add_color("background", "background")
inner.add_color("color", "color")
inner.add_class("spt_table")
inner.add_class("spt_layout")
inner.add_attr("spt_version", "2")
table = my.table
table.add_class("spt_table_table")
# set the sobjects to all the widgets then preprocess
if my.mode == 'widget':
for widget in my.widgets:
widget.handle_layout_behaviors(table)
widget.set_sobjects(my.sobjects)
widget.set_parent_wdg(my)
# preprocess the elements
widget.preprocess()
else:
for i, widget in enumerate(my.widgets):
#widget.handle_layout_behaviors(table)
widget.set_sobjects(my.sobjects)
#widget.set_parent_wdg(my)
# preprocess the elements
widget.preprocess()
my.process_groups()
my.order_sobjects()
my.remap_sobjects()
my.attributes = []
for i, widget in enumerate(my.widgets):
element_name = widget.get_name()
if element_name and element_name != "None":
attrs = my.config.get_element_attributes(element_name)
else:
attrs = {}
my.attributes.append(attrs)
is_refresh = my.kwargs.get("is_refresh")
if my.kwargs.get("show_shelf") not in ['false', False]:
action = my.get_action_wdg()
inner.add(action)
index = 0
table.add_attr("spt_view", my.kwargs.get("view") )
table.set_attr("spt_search_type", my.kwargs.get('search_type'))
table.set_id(my.table_id)
table.add_style("width: 100%")
inner.add(table)
table.add_color("color", "color")
# initialize the spt.table js
#my.handle_table_behaviors(table)
my.handle_headers(table)
border_color = table.get_color("table_border", default="border")
for row, sobject in enumerate(my.sobjects):
# put in a group row
if my.is_grouped:
my.handle_groups(table, row, sobject)
tr = table.add_row()
if row % 2:
background = tr.add_color("background", "background")
else:
background = tr.add_color("background", "background", -7)
tr.add_class("spt_table_row")
tr.add_attr("spt_search_key", sobject.get_search_key())
for i, widget in enumerate(my.widgets):
value_div = DivWdg()
value_div.add_style("padding: 3px")
td = table.add_cell(value_div)
td.add_style("vertical-align: top")
td.add_style("border: solid 1px %s" % border_color)
if my.mode == 'widget':
widget.set_current_index(row)
value_div.add(widget.get_buffer_display())
else:
element_name = widget.get_name()
value = sobject.get_value(element_name, no_exception=True)
value_div.add(value)
top.add_class("spt_table_top");
class_name = Common.get_full_class_name(my)
top.add_attr("spt_class_name", class_name)
table.add_class("spt_table_content");
inner.add_attr("spt_search_type", my.kwargs.get('search_type'))
inner.add_attr("spt_view", my.kwargs.get('view'))
# extra ?? Doesn't really work to keep the mode
inner.add_attr("spt_mode", my.mode)
top.add_attr("spt_mode", my.mode)
inner.add("<br clear='all'/>")
if my.kwargs.get("is_refresh") == 'true':
return inner
else:
return top
def handle_headers(my, table):
# this comes from refresh
widths = my.kwargs.get("column_widths")
# Add the headers
tr = table.add_row()
tr.add_class("spt_table_header_row")
for i, widget in enumerate(my.widgets):
widget_name = widget.get_name()
th = table.add_header()
th.add_style("text-align: left")
th.add_attr("spt_element_name", widget_name)
header_div = DivWdg()
th.add(header_div)
th.add_style("padding: 3px")
th.add_gradient("background", "background", -5, -10)
th.add_border()
if my.mode == 'widget':
value = widget.get_title()
else:
element = widget_name
value = Common.get_display_title(element)
header_div.add(value)
if widths and len(widths) > i:
th.add_style("width", widths[i])
width_set = True
width = widths[i]
else: # get width from definition
width = my.attributes[i].get("width")
if width:
th.add_style("width", width)
width_set = True
if width:
th.add_style("min-width", width)
else:
th.add_style("overflow","hidden")
widget.handle_th(th, i)
def handle_group(my, table, i, sobject, group_name, group_value):
tr, td = table.add_row_cell()
tr.add_color("background", "background3", 5)
tr.add_color("color", "color3")
if group_value == '__NONE__':
label = '---'
else:
label = Common.process_unicode_string(group_value)
td.add(label)
td.add_style("height: 25px")
td.add_style("padding-left: %spx" % (i*15+5))
td.add_style("border-style: solid")
border_color = td.get_color("border")
td.add_style("border-width: 0px 0px 0px 1px")
td.add_style("border-color: %s" % border_color)
td.add_style("font-weight: bold")
|
mrfalcone/pyext2
|
refs/heads/master
|
ext2/__init__.py
|
1
|
#!/usr/bin/env python
"""
Module for interfacing with an Ext2 filesystem image.
"""
__license__ = "BSD"
__copyright__ = "Copyright 2013, Michael R. Falcone"
from .error import *
from .fs import Ext2Filesystem
from .file import Ext2File
__all__ = ["Ext2File", "Ext2Filesystem", "FilesystemError", "InvalidFileTypeError",
"UnsupportedOperationError", "FileNotFoundError"]
|
theblacklion/pyglet
|
refs/heads/pyglet-1.2-maintenance
|
pyglet/text/document.py
|
34
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Formatted and unformatted document interfaces used by text layout.
Abstract representation
=======================
Styled text in pyglet is represented by one of the `AbstractDocument` classes,
which manage the state representation of text and style independently of how
it is loaded or rendered.
A document consists of the document text (a Unicode string) and a set of
named style ranges. For example, consider the following (artificial)
example::
0 5 10 15 20
The cat sat on the mat.
+++++++ +++++++ "bold"
++++++ "italic"
If this example were to be rendered, "The cat" and "the mat" would be in bold,
and "on the" in italics. Note that the second "the" is both bold and italic.
The document styles recorded for this example would be ``"bold"`` over ranges
(0-7, 15-22) and ``"italic"`` over range (12-18). Overlapping styles are
permitted; unlike HTML and other structured markup, the ranges need not be
nested.
The document has no knowledge of the semantics of ``"bold"`` or ``"italic"``,
it stores only the style names. The pyglet layout classes give meaning to
these style names in the way they are rendered; but you are also free to
invent your own style names (which will be ignored by the layout classes).
This can be useful to tag areas of interest in a document, or maintain
references back to the source material.
As well as text, the document can contain arbitrary elements represented by
`InlineElement`. An inline element behaves like a single character in the
documented, but can be rendered by the application.
Paragraph breaks
================
Paragraph breaks are marked with a "newline" character (U+0010). The Unicode
paragraph break (U+2029) can also be used.
Line breaks (U+2028) can be used to force a line break within a paragraph.
See Unicode recommendation UTR #13 for more information:
http://unicode.org/reports/tr13/tr13-5.html.
Document classes
================
Any class implementing `AbstractDocument` provides an interface to a
document model as described above. In theory a structured document such as
HTML or XML could export this model, though the classes provided by pyglet
implement only unstructured documents.
The `UnformattedDocument` class assumes any styles set are set over the entire
document. So, regardless of the range specified when setting a ``"bold"``
style attribute, for example, the entire document will receive that style.
The `FormattedDocument` class implements the document model directly, using
the `RunList` class to represent style runs efficiently.
Style attributes
================
The following character style attribute names are recognised by pyglet:
``font_name``
Font family name, as given to `pyglet.font.load`.
``font_size``
Font size, in points.
``bold``
Boolean.
``italic``
Boolean.
``underline``
4-tuple of ints in range (0, 255) giving RGBA underline color, or None
(default) for no underline.
``kerning``
Additional space to insert between glyphs, in points. Defaults to 0.
``baseline``
Offset of glyph baseline from line baseline, in points. Positive values
give a superscript, negative values give a subscript. Defaults to 0.
``color``
4-tuple of ints in range (0, 255) giving RGBA text color
``background_color``
4-tuple of ints in range (0, 255) giving RGBA text background color; or
``None`` for no background fill.
The following paragraph style attribute names are recognised by pyglet. Note
that paragraph styles are handled no differently from character styles by the
document: it is the application's responsibility to set the style over an
entire paragraph, otherwise results are undefined.
``align``
``left`` (default), ``center`` or ``right``.
``indent``
Additional horizontal space to insert before the first
``leading``
Additional space to insert between consecutive lines within a paragraph,
in points. Defaults to 0.
``line_spacing``
Distance between consecutive baselines in a paragraph, in points.
Defaults to ``None``, which automatically calculates the tightest line
spacing for each line based on the font ascent and descent.
``margin_left``
Left paragraph margin, in pixels.
``margin_right``
Right paragraph margin, in pixels.
``margin_top``
Margin above paragraph, in pixels.
``margin_bottom``
Margin below paragraph, in pixels. Adjacent margins do not collapse.
``tab_stops``
List of horizontal tab stops, in pixels, measured from the left edge of
the text layout. Defaults to the empty list. When the tab stops
are exhausted, they implicitly continue at 50 pixel intervals.
``wrap``
Boolean. If True (the default), text wraps within the width of the layout.
Other attributes can be used to store additional style information within the
document; it will be ignored by the built-in text classes.
All style attributes (including those not present in a document) default to
``None`` (including the so-called "boolean" styles listed above). The meaning
of a ``None`` style is style- and application-dependent.
:since: pyglet 1.1
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import re
import sys
from pyglet import event
from pyglet.text import runlist
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
#: The style attribute takes on multiple values in the document.
STYLE_INDETERMINATE = 'indeterminate'
class InlineElement(object):
'''Arbitrary inline element positioned within a formatted document.
Elements behave like a single glyph in the document. They are
measured by their horizontal advance, ascent above the baseline, and
descent below the baseline.
The pyglet layout classes reserve space in the layout for elements and
call the element's methods to ensure they are rendered at the
appropriate position.
If the size of a element (any of the `advance`, `ascent`, or `descent`
instance variables) is modified it is the application's responsibility to
trigger a reflow of the appropriate area in the affected layouts. This
can be done by forcing a style change over the element's position.
:Ivariables:
`ascent` : int
Ascent of the element above the baseline, in pixels.
`descent` : int
Descent of the element below the baseline, in pixels.
Typically negative.
`advance` : int
Width of the element, in pixels.
'''
def __init__(self, ascent, descent, advance):
self.ascent = ascent
self.descent = descent
self.advance = advance
self._position = None
position = property(lambda self: self._position,
doc='''Position of the element within the
document. Read-only.
:type: int
''')
def place(self, layout, x, y):
'''Construct an instance of the element at the given coordinates.
Called when the element's position within a layout changes, either
due to the initial condition, changes in the document or changes in
the layout size.
It is the responsibility of the element to clip itself against
the layout boundaries, and position itself appropriately with respect
to the layout's position and viewport offset.
The `TextLayout.top_state` graphics state implements this transform
and clipping into window space.
:Parameters:
`layout` : `pyglet.text.layout.TextLayout`
The layout the element moved within.
`x` : int
Position of the left edge of the element, relative
to the left edge of the document, in pixels.
`y` : int
Position of the baseline, relative to the top edge of the
document, in pixels. Note that this is typically negative.
'''
raise NotImplementedError('abstract')
def remove(self, layout):
'''Remove this element from a layout.
The counterpart of `place`; called when the element is no longer
visible in the given layout.
:Parameters:
`layout` : `pyglet.text.layout.TextLayout`
The layout the element was removed from.
'''
raise NotImplementedError('abstract')
class AbstractDocument(event.EventDispatcher):
'''Abstract document interface used by all `pyglet.text` classes.
This class can be overridden to interface pyglet with a third-party
document format. It may be easier to implement the document format in
terms of one of the supplied concrete classes `FormattedDocument` or
`UnformattedDocument`.
'''
_previous_paragraph_re = re.compile(u'\n[^\n\u2029]*$')
_next_paragraph_re = re.compile(u'[\n\u2029]')
def __init__(self, text=''):
super(AbstractDocument, self).__init__()
self._text = u''
self._elements = []
if text:
self.insert_text(0, text)
def _get_text(self):
return self._text
def _set_text(self, text):
if text == self._text:
return
self.delete_text(0, len(self._text))
self.insert_text(0, text)
text = property(_get_text, _set_text,
doc='''Document text.
For efficient incremental updates, use the `insert_text` and
`delete_text` methods instead of replacing this property.
:type: str
''')
def get_paragraph_start(self, pos):
'''Get the starting position of a paragraph.
:Parameters:
`pos` : int
Character position within paragraph.
:rtype: int
'''
# Tricky special case where the $ in pattern matches before the \n at
# the end of the string instead of the end of the string.
if (self._text[:pos + 1].endswith('\n') or
self._text[:pos + 1].endswith(u'\u2029')):
return pos
m = self._previous_paragraph_re.search(self._text, 0, pos + 1)
if not m:
return 0
return m.start() + 1
def get_paragraph_end(self, pos):
'''Get the end position of a paragraph.
:Parameters:
`pos` : int
Character position within paragraph.
:rtype: int
'''
m = self._next_paragraph_re.search(self._text, pos)
if not m:
return len(self._text)
return m.start() + 1
def get_style_runs(self, attribute):
'''Get a style iterator over the given style attribute.
:Parameters:
`attribute` : str
Name of style attribute to query.
:rtype: `AbstractRunIterator`
'''
raise NotImplementedError('abstract')
def get_style(self, attribute, position=0):
'''Get an attribute style at the given position.
:Parameters:
`attribute` : str
Name of style attribute to query.
`position` : int
Character position of document to query.
:return: The style set for the attribute at the given position.
'''
raise NotImplementedError('abstract')
def get_style_range(self, attribute, start, end):
'''Get an attribute style over the given range.
If the style varies over the range, `STYLE_INDETERMINATE` is returned.
:Parameters:
`attribute` : str
Name of style attribute to query.
`start` : int
Starting character position.
`end` : int
Ending character position (exclusive).
:return: The style set for the attribute over the given range, or
`STYLE_INDETERMINATE` if more than one value is set.
'''
iter = self.get_style_runs(attribute)
_, value_end, value = iter.ranges(start, end).next()
if value_end < end:
return STYLE_INDETERMINATE
else:
return value
def get_font_runs(self, dpi=None):
'''Get a style iterator over the `pyglet.font.Font` instances used in
the document.
The font instances are created on-demand by inspection of the
``font_name``, ``font_size``, ``bold`` and ``italic`` style
attributes.
:Parameters:
`dpi` : float
Optional resolution to construct fonts at. See
`pyglet.font.load`.
:rtype: `AbstractRunIterator`
'''
raise NotImplementedError('abstract')
def get_font(self, position, dpi=None):
'''Get the font instance used at the given position.
:see: `get_font_runs`
:Parameters:
`position` : int
Character position of document to query.
`dpi` : float
Optional resolution to construct fonts at. See
`pyglet.font.load`.
:rtype: `pyglet.font.Font`
:return: The font at the given position.
'''
raise NotImplementedError('abstract')
def insert_text(self, start, text, attributes=None):
'''Insert text into the document.
:Parameters:
`start` : int
Character insertion point within document.
`text` : str
Text to insert.
`attributes` : dict
Optional dictionary giving named style attributes of the
inserted text.
'''
self._insert_text(start, text, attributes)
self.dispatch_event('on_insert_text', start, text)
def _insert_text(self, start, text, attributes):
self._text = u''.join((self._text[:start], text, self._text[start:]))
len_text = len(text)
for element in self._elements:
if element._position >= start:
element._position += len_text
def delete_text(self, start, end):
'''Delete text from the document.
:Parameters:
`start` : int
Starting character position to delete from.
`end` : int
Ending character position to delete to (exclusive).
'''
self._delete_text(start, end)
self.dispatch_event('on_delete_text', start, end)
def _delete_text(self, start, end):
for element in list(self._elements):
if start <= element._position < end:
self._elements.remove(element)
elif element._position >= end: # fix bug 538
element._position -= (end - start)
self._text = self._text[:start] + self._text[end:]
def insert_element(self, position, element, attributes=None):
'''Insert a element into the document.
See the `InlineElement` class documentation for details of
usage.
:Parameters:
`position` : int
Character insertion point within document.
`element` : `InlineElement`
Element to insert.
`attributes` : dict
Optional dictionary giving named style attributes of the
inserted text.
'''
assert element._position is None, \
'Element is already in a document.'
self.insert_text(position, '\0', attributes)
element._position = position
self._elements.append(element)
self._elements.sort(key=lambda d:d.position)
def get_element(self, position):
'''Get the element at a specified position.
:Parameters:
`position` : int
Position in the document of the element.
:rtype: `InlineElement`
'''
for element in self._elements:
if element._position == position:
return element
raise RuntimeError('No element at position %d' % position)
def set_style(self, start, end, attributes):
'''Set text style of some or all of the document.
:Parameters:
`start` : int
Starting character position.
`end` : int
Ending character position (exclusive).
`attributes` : dict
Dictionary giving named style attributes of the text.
'''
self._set_style(start, end, attributes)
self.dispatch_event('on_style_text', start, end, attributes)
def _set_style(self, start, end, attributes):
raise NotImplementedError('abstract')
def set_paragraph_style(self, start, end, attributes):
'''Set the style for a range of paragraphs.
This is a convenience method for `set_style` that aligns the
character range to the enclosing paragraph(s).
:Parameters:
`start` : int
Starting character position.
`end` : int
Ending character position (exclusive).
`attributes` : dict
Dictionary giving named style attributes of the paragraphs.
'''
start = self.get_paragraph_start(start)
end = self.get_paragraph_end(end)
self._set_style(start, end, attributes)
self.dispatch_event('on_style_text', start, end, attributes)
if _is_epydoc:
def on_insert_text(self, start, text):
'''Text was inserted into the document.
:Parameters:
`start` : int
Character insertion point within document.
`text` : str
The text that was inserted.
:event:
'''
def on_delete_text(self, start, end):
'''Text was deleted from the document.
:Parameters:
`start` : int
Starting character position of deleted text.
`end` : int
Ending character position of deleted text (exclusive).
:event:
'''
def on_style_text(self, start, end, attributes):
'''Text character style was modified.
:Parameters:
`start` : int
Starting character position of modified text.
`end` : int
Ending character position of modified text (exclusive).
`attributes` : dict
Dictionary giving updated named style attributes of the
text.
:event:
'''
AbstractDocument.register_event_type('on_insert_text')
AbstractDocument.register_event_type('on_delete_text')
AbstractDocument.register_event_type('on_style_text')
class UnformattedDocument(AbstractDocument):
'''A document having uniform style over all text.
Changes to the style of text within the document affects the entire
document. For convenience, the ``position`` parameters of the style
methods may therefore be omitted.
'''
def __init__(self, text=''):
super(UnformattedDocument, self).__init__(text)
self.styles = {}
def get_style_runs(self, attribute):
value = self.styles.get(attribute)
return runlist.ConstRunIterator(len(self.text), value)
def get_style(self, attribute, position=None):
return self.styles.get(attribute)
def set_style(self, start, end, attributes):
return super(UnformattedDocument, self).set_style(
0, len(self.text), attributes)
def _set_style(self, start, end, attributes):
self.styles.update(attributes)
def set_paragraph_style(self, start, end, attributes):
return super(UnformattedDocument, self).set_paragraph_style(
0, len(self.text), attributes)
def get_font_runs(self, dpi=None):
ft = self.get_font(dpi=dpi)
return runlist.ConstRunIterator(len(self.text), ft)
def get_font(self, position=None, dpi=None):
from pyglet import font
font_name = self.styles.get('font_name')
font_size = self.styles.get('font_size')
bold = self.styles.get('bold', False)
italic = self.styles.get('italic', False)
return font.load(font_name, font_size,
bold=bool(bold), italic=bool(italic), dpi=dpi)
def get_element_runs(self):
return runlist.ConstRunIterator(len(self._text), None)
class FormattedDocument(AbstractDocument):
'''Simple implementation of a document that maintains text formatting.
Changes to text style are applied according to the description in
`AbstractDocument`. All styles default to ``None``.
'''
def __init__(self, text=''):
self._style_runs = {}
super(FormattedDocument, self).__init__(text)
def get_style_runs(self, attribute):
try:
return self._style_runs[attribute].get_run_iterator()
except KeyError:
return _no_style_range_iterator
def get_style(self, attribute, position=0):
try:
return self._style_runs[attribute][position]
except KeyError:
return None
def _set_style(self, start, end, attributes):
for attribute, value in attributes.items():
try:
runs = self._style_runs[attribute]
except KeyError:
runs = self._style_runs[attribute] = runlist.RunList(0, None)
runs.insert(0, len(self._text))
runs.set_run(start, end, value)
def get_font_runs(self, dpi=None):
return _FontStyleRunsRangeIterator(
self.get_style_runs('font_name'),
self.get_style_runs('font_size'),
self.get_style_runs('bold'),
self.get_style_runs('italic'),
dpi)
def get_font(self, position, dpi=None):
iter = self.get_font_runs(dpi)
return iter[position]
def get_element_runs(self):
return _ElementIterator(self._elements, len(self._text))
def _insert_text(self, start, text, attributes):
super(FormattedDocument, self)._insert_text(start, text, attributes)
len_text = len(text)
for runs in self._style_runs.values():
runs.insert(start, len_text)
if attributes is not None:
for attribute, value in attributes.items():
try:
runs = self._style_runs[attribute]
except KeyError:
runs = self._style_runs[attribute] = \
runlist.RunList(0, None)
runs.insert(0, len(self.text))
runs.set_run(start, start + len_text, value)
def _delete_text(self, start, end):
super(FormattedDocument, self)._delete_text(start, end)
for runs in self._style_runs.values():
runs.delete(start, end)
def _iter_elements(elements, length):
last = 0
for element in elements:
p = element.position
yield last, p, None
yield p, p + 1, element
last = p + 1
yield last, length, None
class _ElementIterator(runlist.RunIterator):
def __init__(self, elements, length):
self._run_list_iter = _iter_elements(elements, length)
self.start, self.end, self.value = self.next()
class _FontStyleRunsRangeIterator(object):
# XXX subclass runlist
def __init__(self, font_names, font_sizes, bolds, italics, dpi):
self.zip_iter = runlist.ZipRunIterator(
(font_names, font_sizes, bolds, italics))
self.dpi = dpi
def ranges(self, start, end):
from pyglet import font
for start, end, styles in self.zip_iter.ranges(start, end):
font_name, font_size, bold, italic = styles
ft = font.load(font_name, font_size,
bold=bool(bold), italic=bool(italic),
dpi=self.dpi)
yield start, end, ft
def __getitem__(self, index):
from pyglet import font
font_name, font_size, bold, italic = self.zip_iter[index]
return font.load(font_name, font_size,
bold=bool(bold), italic=bool(italic),
dpi=self.dpi)
class _NoStyleRangeIterator(object):
# XXX subclass runlist
def ranges(self, start, end):
yield start, end, None
def __getitem__(self, index):
return None
_no_style_range_iterator = _NoStyleRangeIterator()
|
rbarlow/pulp_puppet
|
refs/heads/master
|
pulp_puppet_tools/setup.py
|
1
|
from setuptools import setup, find_packages
setup(
name='pulp_puppet_tools',
version='2.8.0b4',
license='GPLv2+',
packages=find_packages(exclude=['test', 'test.*']),
author='Pulp Team',
author_email='pulp-list@redhat.com',
entry_points={
'console_scripts': [
'pulp-puppet-module-builder = pulp_puppet.tools.puppet_module_builder:main',
]
}
)
|
binwiederhier/plugin.video.vox-now.de
|
refs/heads/master
|
resources/lib/kodion/impl/mock/mock_context_ui.py
|
2
|
__author__ = 'bromix'
from ..abstract_context_ui import AbstractContextUI
from ...logging import *
class MockContextUI(AbstractContextUI):
def __init__(self):
AbstractContextUI.__init__(self)
self._view_mode = None
pass
def set_view_mode(self, view_mode):
self._view_mode = view_mode
pass
def get_view_mode(self):
return self._view_mode
def get_skin_id(self):
return 'skin.kodion.dummy'
def on_keyboard_input(self, title, default='', hidden=False):
print '[' + title + ']'
print "Returning 'Hello World'"
# var = raw_input("Please enter something: ")
var = u'Hello World'
if var:
return True, var
return False, ''
def show_notification(self, message, header='', image_uri='', time_milliseconds=5000):
log('=======NOTIFICATION=======')
log('Message : %s' % message)
log('header : %s' % header)
log('image_uri: %s' % image_uri)
log('Time : %d' % time_milliseconds)
log('==========================')
pass
def open_settings(self):
log("called 'open_settings'")
pass
def refresh_container(self):
log("called 'refresh_container'")
pass
pass
|
sdague/home-assistant
|
refs/heads/dev
|
tests/components/buienradar/test_sensor.py
|
19
|
"""The tests for the Buienradar sensor platform."""
from homeassistant.components import sensor
from homeassistant.setup import async_setup_component
CONDITIONS = ["stationname", "temperature"]
BASE_CONFIG = {
"sensor": [
{
"platform": "buienradar",
"name": "volkel",
"latitude": 51.65,
"longitude": 5.7,
"monitored_conditions": CONDITIONS,
}
]
}
async def test_smoke_test_setup_component(hass):
"""Smoke test for successfully set-up with default config."""
assert await async_setup_component(hass, sensor.DOMAIN, BASE_CONFIG)
await hass.async_block_till_done()
for cond in CONDITIONS:
state = hass.states.get(f"sensor.volkel_{cond}")
assert state.state == "unknown"
|
ultmaster/eoj3
|
refs/heads/master
|
account/migrations/0011_auto_20170928_1327.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-09-28 13:27
from __future__ import unicode_literals
import account.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0010_auto_20170914_1920'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, unique=True, validators=[account.models.UsernameValidator()], verbose_name='username'),
),
]
|
wesparish/heat-templates
|
refs/heads/master
|
tests/software_config/config-tool-fake.py
|
8
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
A fake config tool for unit testing the software-config hooks.
JSON containing the current environment variables and command line arguments
are written to the file specified by the path in environment variable
TEST_STATE_PATH.
Environment variable TEST_RESPONSE defines JSON specifying what files to write
out, and what to print to stdout and stderr.
'''
import json
import os
import sys
def main(argv=sys.argv):
with open(os.environ.get('TEST_STATE_PATH'), 'w') as f:
json.dump({'env': dict(os.environ), 'args': argv}, f)
if 'TEST_RESPONSE' not in os.environ:
return
response = json.loads(os.environ.get('TEST_RESPONSE'))
for k, v in response.get('files', {}).iteritems():
open(k, 'w')
with open(k, 'w') as f:
f.write(v)
sys.stdout.write(response.get('stdout', ''))
sys.stderr.write(response.get('stderr', ''))
return response.get('returncode', 0)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
wwj718/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/tests/test_credit_requirements.py
|
9
|
"""
Tests for credit requirement display on the progress page.
"""
import datetime
import ddt
from mock import patch
from pytz import UTC
from django.conf import settings
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from util.date_utils import get_time_display, DEFAULT_SHORT_DATE_FORMAT
from course_modes.models import CourseMode
from openedx.core.djangoapps.credit import api as credit_api
from openedx.core.djangoapps.credit.models import CreditCourse
@patch.dict(settings.FEATURES, {"ENABLE_CREDIT_ELIGIBILITY": True})
@ddt.ddt
class ProgressPageCreditRequirementsTest(ModuleStoreTestCase):
"""
Tests for credit requirement display on the progress page.
"""
USERNAME = "bob"
PASSWORD = "test"
USER_FULL_NAME = "Bob"
MIN_GRADE_REQ_DISPLAY = "Final Grade Credit Requirement"
VERIFICATION_REQ_DISPLAY = "Midterm Exam Credit Requirement"
def setUp(self):
super(ProgressPageCreditRequirementsTest, self).setUp()
# Create a course and configure it as a credit course
self.course = CourseFactory.create()
CreditCourse.objects.create(course_key=self.course.id, enabled=True)
# Configure credit requirements (passing grade and in-course reverification)
credit_api.set_credit_requirements(
self.course.id,
[
{
"namespace": "grade",
"name": "grade",
"display_name": self.MIN_GRADE_REQ_DISPLAY,
"criteria": {
"min_grade": 0.8
}
},
{
"namespace": "reverification",
"name": "midterm",
"display_name": self.VERIFICATION_REQ_DISPLAY,
"criteria": {}
}
]
)
# Create a user and log in
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
self.user.profile.name = self.USER_FULL_NAME
self.user.profile.save()
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
# Enroll the user in the course as "verified"
self.enrollment = CourseEnrollmentFactory(
user=self.user,
course_id=self.course.id,
mode="verified"
)
def test_credit_requirements_maybe_eligible(self):
# The user hasn't satisfied any of the credit requirements yet, but she
# also hasn't failed any.
response = self._get_progress_page()
# Expect that the requirements are displayed
self.assertContains(response, self.MIN_GRADE_REQ_DISPLAY)
self.assertContains(response, self.VERIFICATION_REQ_DISPLAY)
self.assertContains(response, "Upcoming")
self.assertContains(
response,
"{}, you have not yet met the requirements for credit".format(self.USER_FULL_NAME)
)
def test_credit_requirements_eligible(self):
# Mark the user as eligible for all requirements
credit_api.set_credit_requirement_status(
self.user.username, self.course.id,
"grade", "grade",
status="satisfied",
reason={"final_grade": 0.95}
)
credit_api.set_credit_requirement_status(
self.user.username, self.course.id,
"reverification", "midterm",
status="satisfied", reason={}
)
# Check the progress page display
response = self._get_progress_page()
self.assertContains(response, self.MIN_GRADE_REQ_DISPLAY)
self.assertContains(response, self.VERIFICATION_REQ_DISPLAY)
self.assertContains(
response,
"{}, you have met the requirements for credit in this course.".format(self.USER_FULL_NAME)
)
self.assertContains(response, "Completed {date}".format(date=self._now_formatted_date()))
self.assertContains(response, "95%")
def test_credit_requirements_not_eligible(self):
# Mark the user as having failed both requirements
credit_api.set_credit_requirement_status(
self.user.username, self.course.id,
"reverification", "midterm",
status="failed", reason={}
)
# Check the progress page display
response = self._get_progress_page()
self.assertContains(response, self.MIN_GRADE_REQ_DISPLAY)
self.assertContains(response, self.VERIFICATION_REQ_DISPLAY)
self.assertContains(
response,
"{}, you are no longer eligible for credit in this course.".format(self.USER_FULL_NAME)
)
self.assertContains(response, "Verification Failed")
@ddt.data(
(CourseMode.VERIFIED, True),
(CourseMode.CREDIT_MODE, True),
(CourseMode.HONOR, False),
(CourseMode.AUDIT, False),
(CourseMode.PROFESSIONAL, False),
(CourseMode.NO_ID_PROFESSIONAL_MODE, False)
)
@ddt.unpack
def test_credit_requirements_on_progress_page(self, enrollment_mode, is_requirement_displayed):
"""Test the progress table is only displayed to the verified and credit students."""
self.enrollment.mode = enrollment_mode
self.enrollment.save() # pylint: disable=no-member
response = self._get_progress_page()
# Verify the requirements are shown only if the user is in a credit-eligible mode.
classes = ('credit-eligibility', 'eligibility-heading')
method = self.assertContains if is_requirement_displayed else self.assertNotContains
for _class in classes:
method(response, _class)
def _get_progress_page(self):
"""Load the progress page for the course the user is enrolled in. """
url = reverse("progress", kwargs={"course_id": unicode(self.course.id)})
return self.client.get(url)
def _now_formatted_date(self):
"""Retrieve the formatted current date. """
return get_time_display(
datetime.datetime.now(UTC),
DEFAULT_SHORT_DATE_FORMAT,
settings.TIME_ZONE
)
|
coursera/courseraresearchexports
|
refs/heads/master
|
courseraresearchexports/constants/__init__.py
|
1
|
__all__ = [
"api_constants",
"db_constants",
"container_constants"
]
from . import * # noqa
|
liangazhou/django-rdp
|
refs/heads/master
|
packages/Django-1.8.6/tests/many_to_many/tests.py
|
2
|
from __future__ import unicode_literals
from django.db import transaction
from django.test import TestCase
from django.utils import six
from .models import Article, InheritedArticleA, InheritedArticleB, Publication
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(id=None, title='The Python Journal')
self.p2 = Publication.objects.create(id=None, title='Science News')
self.p3 = Publication.objects.create(id=None, title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(id=None, headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(id=None, headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
def test_add(self):
# Create an Article.
a5 = Article(id=None, headline='Django lets you reate Web apps easily')
# You can't associate it with a Publication until it's been saved.
self.assertRaises(ValueError, getattr, a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(id=None, headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Adding an object of the wrong type raises TypeError
with six.assertRaisesRegex(self, TypeError, "'Publication' instance expected, got <Article.*"):
with transaction.atomic():
a6.publications.add(a5)
# Add a Publication directly via publications.add by using keyword arguments.
a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: Science News>'])
# Adding via the other end using keywords
self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>'])
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>'])
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
])
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(),
['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_assign(self):
# Relation sets can be assigned. Assignment clears any existing set members
self.p2.article_set = [self.a4, self.a3]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
# An alternate to calling clear() is to assign the empty set
self.p2.article_set = []
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications = []
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set = [self.a4.id, self.a3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
def test_forward_assign_with_queryset(self):
# Ensure that querysets used in m2m assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ManyRelatedObjectsDescriptor.__set__. Refs #19816.
self.a1.publications = [self.p1, self.p2]
qs = self.a1.publications.filter(title='The Python Journal')
self.a1.publications = qs
self.assertEqual(1, self.a1.publications.count())
self.assertEqual(1, qs.count())
def test_reverse_assign_with_queryset(self):
# Ensure that querysets used in M2M assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ReverseManyRelatedObjectsDescriptor.__set__. Refs #19816.
self.p1.article_set = [self.a1, self.a2]
qs = self.p1.article_set.filter(headline='Django lets you build Web apps easily')
self.p1.article_set = qs
self.assertEqual(1, self.p1.article_set.count())
self.assertEqual(1, qs.count())
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
[
'<Publication: Science News>',
])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
def test_inherited_models_selects(self):
"""
#24156 - Objects from child models where the parent's m2m field uses
related_name='+' should be retrieved correctly.
"""
a = InheritedArticleA.objects.create()
b = InheritedArticleB.objects.create()
a.publications.add(self.p1, self.p2)
self.assertQuerysetEqual(a.publications.all(),
[
'<Publication: Science News>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(b.publications.all(), [])
b.publications.add(self.p3)
self.assertQuerysetEqual(a.publications.all(),
[
'<Publication: Science News>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(b.publications.all(),
[
'<Publication: Science Weekly>',
])
|
tiagofernandez/dailyevents-cloud
|
refs/heads/master
|
acceptance/dailyevents/__init__.py
|
12133432
| |
CSC-ORG/Dynamic-Dashboard-2015
|
refs/heads/master
|
engine/lib/python2.7/site-packages/django/contrib/gis/db/backends/spatialite/__init__.py
|
12133432
| |
yunyu2019/blog
|
refs/heads/master
|
python/python_challenge/5.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# note:序列与反序列化pickle
import pickle
import pprint
import urllib2
urls='http://www.pythonchallenge.com/pc/def/banner.p'
fp=urllib2.urlopen(urls)
cont=pickle.load(fp)
"""
#方法一
def makestring(line):
s=''
for char,num in line:
s+=char*num
return s
for line in cont:
s=makestring(line)
print s
"""
#方法二简化代码
print '\n'.join([''.join([p[0] * p[1] for p in row]) for row in cont])
|
moijes12/oh-mainline
|
refs/heads/master
|
vendor/packages/oauthlib/oauthlib/oauth1/rfc5849/endpoints/authorization.py
|
87
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.authorization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for signing and checking OAuth 1.0 RFC 5849 requests.
"""
from __future__ import absolute_import, unicode_literals
from oauthlib.common import Request, add_params_to_uri
from .base import BaseEndpoint
from .. import errors
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
class AuthorizationEndpoint(BaseEndpoint):
"""An endpoint responsible for letting authenticated users authorize access
to their protected resources to a client.
Typical use would be to have two views, one for displaying the authorization
form and one to process said form on submission.
The first view will want to utilize ``get_realms_and_credentials`` to fetch
requested realms and useful client credentials, such as name and
description, to be used when creating the authorization form.
During form processing you can use ``create_authorization_response`` to
validate the request, create a verifier as well as prepare the final
redirection URI used to send the user back to the client.
See :doc:`/oauth1/validator` for details on which validator methods to implement
for this endpoint.
"""
def create_verifier(self, request, credentials):
"""Create and save a new request token.
:param request: An oauthlib.common.Request object.
:param credentials: A dict of extra token credentials.
:returns: The verifier as a dict.
"""
verifier = {
'oauth_token': request.resource_owner_key,
'oauth_verifier': self.token_generator(),
}
verifier.update(credentials)
self.request_validator.save_verifier(
request.resource_owner_key, verifier, request)
return verifier
def create_authorization_response(self, uri, http_method='GET', body=None,
headers=None, realms=None, credentials=None):
"""Create an authorization response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of credentials to include in the verifier.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
If the callback URI tied to the current token is "oob", a response with
a 200 status code will be returned. In this case, it may be desirable to
modify the response to better display the verifier to the client.
An example of an authorization request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AuthorizationEndpoint
>>> endpoint = AuthorizationEndpoint(your_validator)
>>> h, b, s = endpoint.create_authorization_response(
... 'https://your.provider/authorize?oauth_token=...',
... credentials={
... 'extra': 'argument',
... })
>>> h
{'Location': 'https://the.client/callback?oauth_verifier=...&extra=argument'}
>>> b
None
>>> s
302
An example of a request with an "oob" callback::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AuthorizationEndpoint
>>> endpoint = AuthorizationEndpoint(your_validator)
>>> h, b, s = endpoint.create_authorization_response(
... 'https://your.provider/authorize?foo=bar',
... credentials={
... 'extra': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_verifier=...&extra=argument'
>>> s
200
"""
request = self._create_request(uri, http_method=http_method, body=body,
headers=headers)
if not request.resource_owner_key:
raise errors.InvalidRequestError(
'Missing mandatory parameter oauth_token.')
if not self.request_validator.verify_request_token(
request.resource_owner_key, request):
raise errors.InvalidClientError()
request.realms = realms
if (request.realms and not self.request_validator.verify_realms(
request.resource_owner_key, request.realms, request)):
raise errors.InvalidRequestError(
description=('User granted access to realms outside of '
'what the client may request.'))
verifier = self.create_verifier(request, credentials or {})
redirect_uri = self.request_validator.get_redirect_uri(
request.resource_owner_key, request)
if redirect_uri == 'oob':
response_headers = {
'Content-Type': 'application/x-www-form-urlencoded'}
response_body = urlencode(verifier)
return response_headers, response_body, 200
else:
populated_redirect = add_params_to_uri(
redirect_uri, verifier.items())
return {'Location': populated_redirect}, None, 302
def get_realms_and_credentials(self, uri, http_method='GET', body=None,
headers=None):
"""Fetch realms and credentials for the presented request token.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:returns: A tuple of 2 elements.
1. A list of request realms.
2. A dict of credentials which may be useful in creating the
authorization form.
"""
request = self._create_request(uri, http_method=http_method, body=body,
headers=headers)
if not self.request_validator.verify_request_token(
request.resource_owner_key, request):
raise errors.InvalidClientError()
realms = self.request_validator.get_realms(
request.resource_owner_key, request)
return realms, {'resource_owner_key': request.resource_owner_key}
|
yokose-ks/edx-platform
|
refs/heads/gacco3/master
|
lms/djangoapps/instructor/tests/test_legacy_anon_csv.py
|
5
|
"""
Unit tests for instructor dashboard
Based on (and depends on) unit tests for courseware.
Notes for running by hand:
./manage.py lms --settings test test lms/djangoapps/instructor
"""
from django.test.utils import override_settings
# Need access to internal func to put users in the right group
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from student.roles import CourseStaffRole
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.django import modulestore, clear_existing_modulestores
from mock import patch
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorDashboardAnonCSV(ModuleStoreTestCase, LoginEnrollmentTestCase):
'''
Check for download of csv
'''
# Note -- I copied this setUp from a similar test
def setUp(self):
clear_existing_modulestores()
self.toy = modulestore().get_course("edX/toy/2012_Fall")
# Create two accounts
self.student = 'view@test.com'
self.instructor = 'view2@test.com'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.create_account('u2', self.instructor, self.password)
self.activate_user(self.student)
self.activate_user(self.instructor)
CourseStaffRole(self.toy.location).add_users(User.objects.get(email=self.instructor))
self.logout()
self.login(self.instructor, self.password)
self.enroll(self.toy)
def test_download_anon_csv(self):
course = self.toy
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id})
with patch('instructor.views.legacy.unique_id_for_user') as mock_unique:
mock_unique.return_value = 42
response = self.client.post(url, {'action': 'Download CSV of all student anonymized IDs'})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertEqual(body, '"User ID","Anonymized user ID"\n"2","42"\n')
|
gold3bear/swift
|
refs/heads/master
|
swift/common/container_sync_realms.py
|
24
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import hashlib
import hmac
import os
import time
from six.moves import configparser
from swift import gettext_ as _
from swift.common.utils import get_valid_utf8_str
class ContainerSyncRealms(object):
"""
Loads and parses the container-sync-realms.conf, occasionally
checking the file's mtime to see if it needs to be reloaded.
"""
def __init__(self, conf_path, logger):
self.conf_path = conf_path
self.logger = logger
self.next_mtime_check = 0
self.mtime_check_interval = 300
self.conf_path_mtime = 0
self.data = {}
self.reload()
def reload(self):
"""Forces a reload of the conf file."""
self.next_mtime_check = 0
self.conf_path_mtime = 0
self._reload()
def _reload(self):
now = time.time()
if now >= self.next_mtime_check:
self.next_mtime_check = now + self.mtime_check_interval
try:
mtime = os.path.getmtime(self.conf_path)
except OSError as err:
if err.errno == errno.ENOENT:
log_func = self.logger.debug
else:
log_func = self.logger.error
log_func(_('Could not load %r: %s'), self.conf_path, err)
else:
if mtime != self.conf_path_mtime:
self.conf_path_mtime = mtime
try:
conf = configparser.SafeConfigParser()
conf.read(self.conf_path)
except configparser.ParsingError as err:
self.logger.error(
_('Could not load %r: %s'), self.conf_path, err)
else:
try:
self.mtime_check_interval = conf.getint(
'DEFAULT', 'mtime_check_interval')
self.next_mtime_check = \
now + self.mtime_check_interval
except configparser.NoOptionError:
self.mtime_check_interval = 300
self.next_mtime_check = \
now + self.mtime_check_interval
except (configparser.ParsingError, ValueError) as err:
self.logger.error(
_('Error in %r with mtime_check_interval: %s'),
self.conf_path, err)
realms = {}
for section in conf.sections():
realm = {}
clusters = {}
for option, value in conf.items(section):
if option in ('key', 'key2'):
realm[option] = value
elif option.startswith('cluster_'):
clusters[option[8:].upper()] = value
realm['clusters'] = clusters
realms[section.upper()] = realm
self.data = realms
def realms(self):
"""Returns a list of realms."""
self._reload()
return self.data.keys()
def key(self, realm):
"""Returns the key for the realm."""
self._reload()
result = self.data.get(realm.upper())
if result:
result = result.get('key')
return result
def key2(self, realm):
"""Returns the key2 for the realm."""
self._reload()
result = self.data.get(realm.upper())
if result:
result = result.get('key2')
return result
def clusters(self, realm):
"""Returns a list of clusters for the realm."""
self._reload()
result = self.data.get(realm.upper())
if result:
result = result.get('clusters')
if result:
result = result.keys()
return result or []
def endpoint(self, realm, cluster):
"""Returns the endpoint for the cluster in the realm."""
self._reload()
result = None
realm_data = self.data.get(realm.upper())
if realm_data:
cluster_data = realm_data.get('clusters')
if cluster_data:
result = cluster_data.get(cluster.upper())
return result
def get_sig(self, request_method, path, x_timestamp, nonce, realm_key,
user_key):
"""
Returns the hexdigest string of the HMAC-SHA1 (RFC 2104) for
the information given.
:param request_method: HTTP method of the request.
:param path: The path to the resource.
:param x_timestamp: The X-Timestamp header value for the request.
:param nonce: A unique value for the request.
:param realm_key: Shared secret at the cluster operator level.
:param user_key: Shared secret at the user's container level.
:returns: hexdigest str of the HMAC-SHA1 for the request.
"""
nonce = get_valid_utf8_str(nonce)
realm_key = get_valid_utf8_str(realm_key)
user_key = get_valid_utf8_str(user_key)
return hmac.new(
realm_key,
'%s\n%s\n%s\n%s\n%s' % (
request_method, path, x_timestamp, nonce, user_key),
hashlib.sha1).hexdigest()
|
patrickfournier/woodbox
|
refs/heads/master
|
access_control/record.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from abc import ABCMeta, abstractmethod
from sqlalchemy import and_, or_, true, false, text
from ..models.user_model import WBUserModel, WBRoleModel
from ..models.record_acl_model import RecordACLModel
class RecordAccessControl(object):
"""Base record access control class.
This is an abstract class. Use one of the derivative, or derivate
your own class.
"""
__metaclass__ = ABCMeta
def __init__(self, *args, **kwargs):
pass
@staticmethod
def _alter_query(query, alter):
for j in alter['outerjoin']:
query = query.outerjoin(j['table'], j['on'])
return query.filter(alter['filter'])
def alter_query(self, op, query, user, item_type, model_class):
alter = self.get_alteration(op, user, item_type, model_class)
return self._alter_query(query, alter)
@abstractmethod
def get_alteration(self, op, user, item_type, model_class):
return {'outerjoin': [], 'filter': true()}
class And(RecordAccessControl):
"""Alter a query by and-ing all access control conditions passed in the constructor."""
def __init__(self, *args, **kwargs):
for c in args:
self.operands = args
super(And, self).__init__(*args, **kwargs);
def get_alteration(self, op, user, item_type, model_class):
outerjoins = []
filters = []
for ac in self.operands:
alter = ac.get_alteration(op, user, item_type, model_class)
outerjoins += alter['outerjoin']
filters.append(alter['filter'])
return {'outerjoin': outerjoins, 'filter': and_(*filters)}
class Or(RecordAccessControl):
"""Alter a query by or-ing all access control conditions passed in the constructor."""
def __init__(self, *args, **kwargs):
for c in args:
self.operands = args
super(Or, self).__init__(*args, **kwargs);
def get_alteration(self, op, user, item_type, model_class):
outerjoins = []
filters = []
for ac in self.operands:
alter = ac.get_alteration(op, user, item_type, model_class)
outerjoins += alter['outerjoin']
filters.append(alter['filter'])
return {'outerjoin': outerjoins, 'filter': or_(*filters)}
class OpSwitch(RecordAccessControl):
def __init__(self, read_ac=None, update_ac=None, delete_ac=None, *args, **kwargs):
self.read_ac = read_ac
self.update_ac = update_ac
self.delete_ac = delete_ac
super(OpSwitch, self).__init__(*args, **kwargs);
def get_alteration(self, op, user, item_type, model_class):
outerjoins = []
filters = []
if op == 'read' and self.read_ac is not None:
return self.read_ac.get_alteration(op, user, item_type, model_class)
elif op == 'update' and self.update_ac is not None:
return self.update_ac.get_alteration(op, user, item_type, model_class)
elif op == 'delete' and self.delete_ac is not None:
return self.delete_ac.get_alteration(op, user, item_type, model_class)
else:
return super(OpSwitch, self).get_alteration(op, user, item_type, model_class)
class IsOwner(RecordAccessControl):
"""Alter a query to only return records owned by `user`."""
def __init__(self, owner_id_column="owner_id", *args, **kwargs):
self.owner_id_column = owner_id_column
super(IsOwner, self).__init__(*args, **kwargs)
def get_alteration(self, op, user, item_type, model_class):
if user is None:
return {'outerjoin': [], 'filter': false()}
else:
c = getattr(model_class, self.owner_id_column)
return {'outerjoin': [], 'filter': c == user}
class IsUser1(RecordAccessControl):
"""Alter a query to return all records if `user` is 1.
This gives access to all records to user 1.
"""
def get_alteration(self, op, user, item_type, model_class):
if user == 1:
return {'outerjoin': [], 'filter': true()}
else:
return {'outerjoin': [], 'filter': false()}
class HasRole(RecordAccessControl):
"""Alter a query to return all records if `user` has one of the roles in `self.roles`."""
def __init__(self, roles, *args, **kwargs):
assert hasattr(roles, '__iter__')
self.roles = set(roles)
super(HasRole, self).__init__(*args, **kwargs)
def get_alteration(self, op, user, item_type, model_class):
if user is None:
roles = {WBRoleModel.anonymous_role_name}
else:
user = WBUserModel.query.get(user)
roles = {r.rolename for r in user.roles}
if roles & self.roles:
return {'outerjoin': [], 'filter': true()}
else:
return {'outerjoin': [], 'filter': false()}
class InRecordACL(RecordAccessControl):
"""Alter a query to return records having a RecordACLModel entry that matches the specified parameters."""
def get_alteration(self, op, user, item_type, model_class):
if user is None:
anonymous_role_id = WBRoleModel.get_anonymous_role_id()
user_roles = {anonymous_role_id}
else:
user = WBUserModel.query.get(user)
user_roles = {r.id for r in user.roles}
return {
'outerjoin': [{
'table': RecordACLModel,
'on': RecordACLModel.record_id == model_class.id
}],
'filter' : and_(RecordACLModel.user_role_id.in_(user_roles),
RecordACLModel.record_type == item_type,
RecordACLModel.permission == op)
}
|
wagnerand/amo-validator
|
refs/heads/master
|
tests/test_contextgenerator.py
|
2
|
from validator.contextgenerator import ContextGenerator
def test_load_data():
"""Test that data is loaded properly into the CG."""
d = """abc
def
ghi"""
c = ContextGenerator(d)
print c.data
assert len(c.data) == 3
# Through inductive reasoning, we can assert that every other line
# is imported properly
assert c.data[0].strip() == 'abc'
assert c.data[1].strip() == 'def'
def test_get_context():
"""Test that contexts are generated properly."""
d = open('tests/resources/contextgenerator/data.txt').read()
c = ContextGenerator(d)
print c.data
c_start = c.get_context(line=1, column=0)
c_end = c.get_context(line=11, column=0)
print c_start
print c_end
# Contexts are always length 3
assert len(c_start) == 3
assert c_start[0] is None
assert len(c_end) == 3
assert c_end[2] is None
assert c_start[1] == '0123456789'
assert c_end[0] == '9012345678'
assert c_end[1] == ''
c_mid = c.get_context(line=5)
assert len(c_mid) == 3
assert c_mid[0] == '3456789012'
assert c_mid[2] == '5678901234'
print c_mid
def test_get_context_trimming():
"""
Test that contexts are generated properly when lines are >140 characters.
"""
d = open('tests/resources/contextgenerator/longdata.txt').read()
c = ContextGenerator(d)
print c.data
trimmed = c.get_context(line=2, column=89)
proper_lengths = (140, 148, 140)
print trimmed
print [len(x) for x in trimmed]
for i in range(3):
assert len(trimmed[i]) == proper_lengths[i]
def test_get_context_trimming_inverse():
"""
Tests that surrounding lines are trimmed properly; the error line is
ignored if it is less than 140 characters.
"""
d = open('tests/resources/contextgenerator/longdata.txt').read()
c = ContextGenerator(d)
print c.data
trimmed = c.get_context(line=6, column=0)
print trimmed
assert trimmed[1] == 'This line should be entirely visible.'
assert trimmed[0][0] != 'X'
assert trimmed[2][-1] != 'X'
def test_get_line():
"""Test that the context generator returns the proper line."""
d = open('tests/resources/contextgenerator/data.txt').read()
c = ContextGenerator(d)
print c.data
print c.get_line(30)
assert c.get_line(30) == 3
print c.get_line(11)
assert c.get_line(11) == 2
print c.get_line(10000)
assert c.get_line(10000) == 11
def test_leading_whitespace():
"""Test that leading whitespace is trimmed properly."""
def run(data, expectation, line=2):
# Strip blank lines.
data = '\n'.join(filter(None, data.split('\n')))
# Get the context and assert its equality.
c = ContextGenerator(data)
assert c.get_context(line) == expectation
run(' One space\n'
' Two spaces\n'
' Three spaces',
('One space', ' Two spaces', ' Three spaces'))
run('\n \n ',
('', '', ''))
run(' Two\n'
' One\n'
' Three',
(' Two', 'One', ' Three'))
run('None\n'
' One\n'
' One',
('None', ' One', ' One'))
|
gskachkov/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_xml_output_unittest.py
|
306
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import errno
import os
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = "\nStack trace:\n*"
else:
STACK_TRACE_TEMPLATE = ""
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="15" failures="4" disabled="2" errors="0" time="*" name="AllTests">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*" name="AllTests">
</testsuites>"""
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput("gtest_no_test_unittest",
EXPECTED_EMPTY_XML, 0)
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
"gtest_no_test_unittest")
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, "%s=xml" % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + "out.xml")
if os.path.isfile(xml_path):
os.remove(xml_path)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
command = [gtest_prog_path,
"%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path),
"--shut_down_xml"]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def _TestXmlOutput(self, gtest_prog_name, expected_xml, expected_exit_code):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + "out.xml")
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = [gtest_prog_path, "%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path)]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, expected_exit_code))
expected = minidom.parseString(expected_xml)
actual = minidom.parse(xml_path)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual .unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
|
huguesv/PTVS
|
refs/heads/master
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py
|
374
|
import sys
try:
# Our match_hostname function is the same as 3.5's, so we only want to
# import the match_hostname function if it's at least that good.
if sys.version_info < (3, 5):
raise ImportError("Fallback to vendored code")
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname')
|
rahushen/ansible
|
refs/heads/devel
|
lib/ansible/modules/notification/twilio.py
|
47
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Makai <matthew.makai@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
version_added: "1.6"
module: twilio
short_description: Sends a text message to a mobile phone through Twilio.
description:
- Sends a text message to a phone number through the Twilio messaging API.
notes:
- This module is non-idempotent because it sends an email through the
external API. It is idempotent only in the case that the module fails.
- Like the other notification modules, this one requires an external
dependency to work. In this case, you'll need a Twilio account with
a purchased or verified phone number to send the text message.
options:
account_sid:
description:
user's Twilio account token found on the account page
required: true
auth_token:
description: user's Twilio authentication token
required: true
msg:
description:
the body of the text message
required: true
to_number:
description:
one or more phone numbers to send the text message to,
format +15551112222
required: true
from_number:
description:
the Twilio number to send the text message from, format +15551112222
required: true
media_url:
description:
a URL with a picture, video or sound clip to send with an MMS
(multimedia message) instead of a plain SMS
required: false
author: "Matt Makai (@makaimc)"
'''
EXAMPLES = '''
# send an SMS about the build status to (555) 303 5681
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: All servers with webserver role are now configured.
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15552014545
to_number: +15553035681
delegate_to: localhost
# send an SMS to multiple phone numbers about the deployment
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: This server configuration is now complete.
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15553258899
to_number:
- +15551113232
- +12025551235
- +19735559010
delegate_to: localhost
# send an MMS to a single recipient with an update on the deployment
# and an image of the results
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: Deployment complete!
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15552014545
to_number: +15553035681
media_url: https://demo.twilio.com/logo.png
delegate_to: localhost
'''
# =======================================
# twilio module support methods
#
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url
def post_twilio_api(module, account_sid, auth_token, msg, from_number,
to_number, media_url=None):
URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
% (account_sid,)
AGENT = "Ansible"
data = {'From': from_number, 'To': to_number, 'Body': msg}
if media_url:
data['MediaUrl'] = media_url
encoded_data = urlencode(data)
headers = {'User-Agent': AGENT,
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json',
}
# Hack module params to have the Basic auth params that fetch_url expects
module.params['url_username'] = account_sid.replace('\n', '')
module.params['url_password'] = auth_token.replace('\n', '')
return fetch_url(module, URI, data=encoded_data, headers=headers)
# =======================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_sid=dict(required=True),
auth_token=dict(required=True, no_log=True),
msg=dict(required=True),
from_number=dict(required=True),
to_number=dict(required=True),
media_url=dict(default=None, required=False),
),
supports_check_mode=True
)
account_sid = module.params['account_sid']
auth_token = module.params['auth_token']
msg = module.params['msg']
from_number = module.params['from_number']
to_number = module.params['to_number']
media_url = module.params['media_url']
if not isinstance(to_number, list):
to_number = [to_number]
for number in to_number:
r, info = post_twilio_api(module, account_sid, auth_token, msg,
from_number, number, media_url)
if info['status'] not in [200, 201]:
body_message = "unknown error"
if 'body' in info:
body = json.loads(info['body'])
body_message = body['message']
module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
module.exit_json(msg=msg, changed=False)
if __name__ == '__main__':
main()
|
amlight/ofp_sniffer
|
refs/heads/master
|
libs/tcpiplib/process_data.py
|
1
|
"""
This module has functions to help processing the data from
PacketIn and PacketOut.
"""
from pyof.foundation.basic_types import BinaryData
from libs.tcpiplib.packet import Ethernet, VLAN, IP, TCP, LLDP, ARP, OessFvd
def dissect_data(data, start=0):
"""
This function aims to dissect PacketIn and PacketOut data
It assumes it is
Ethernet [qinq] [vlan] (BDDP|LLDP|ARP|IP) [TCP|UDP]
Args:
data: BinaryData
start: offset
Returns:
payload: array with all classes
"""
if len(data.value) == 0:
# No Data
return 0
packet = data.value
payload = []
# Ethernet
eth = Ethernet()
eth.parse(packet[start:start + 14], 1)
payload.append(eth)
# VLAN or not - ETYPE 0x8100 or 33024
etype = '0x0000'
start += 14
if eth.protocol in [34984]: # 0x88a8
# Frame has QinQ
vlan = VLAN()
vlan.parse(packet[start:start + 4])
payload.append(vlan)
etype = vlan.protocol
start += 4
else:
etype = eth.protocol
# if there is no content, return
if len(packet[start:]) == 0:
return payload
if not start: # In case there was a QinQ header.
start += 14
if etype in [33024] or eth.protocol in [33024]:
# Frame has VLAN
vlan = VLAN()
vlan.parse(packet[start:start + 4])
payload.append(vlan)
etype = vlan.protocol
start += 4
else:
if not etype:
etype = eth.protocol
# if there is no content, return
if len(packet[start:]) == 0:
return payload
# OESS FVD
if etype in [34998]:
fvd = OessFvd()
try:
fvd.parse(packet[start:])
except Exception as error:
print(error)
payload.append(fvd)
return payload
# LLDP - ETYPE 0x88CC or 35020 or
# BBDP - ETYPE 0x8942 or 35138
if etype in [35020, 35138]:
lldp = LLDP()
try:
lldp.parse(packet[start:])
except:
pass
if not isinstance(lldp, LLDP):
lldp.c_id = 0
else:
payload.append(lldp)
return payload
# IP - ETYPE 0x800 or 2048
if etype in [2048]:
ip_addr = IP()
ip_addr.parse(packet, start)
payload.append(ip_addr)
if ip_addr.protocol is 6:
tcp = TCP()
tcp.parse(packet, start + ip_addr.length)
payload.append(tcp)
return payload
# ARP - ETYPE 0x806 or 2054
if etype in [2054]:
arp = ARP()
arp.parse(packet[start:])
payload.append(arp)
return payload
return payload
def is_protocol(data, lldp=False, oess=False, arp=False):
"""
Check if Data is protocol provided
Args:
data: PacketOut/PacketIn/OESS data
lldp: check for lldp
oess: check for oess
arp: check for arp
Returns:
protocol class if True
False if it is not
"""
protocol = []
return_protocol = False
if lldp:
protocol.append(35020) # LLDP
protocol.append(35138) # BDDP
elif oess:
protocol.append(34998) # Private
elif arp:
protocol.append(2054) # ARP 0x806
else:
return_protocol = True
if isinstance(data, BinaryData):
data = dissect_data(data)
try:
eth = data.pop(0)
next_protocol = eth.protocol
if next_protocol in [33024, 34984]:
vlan = data.pop(0)
if return_protocol:
return vlan.protocol
next_protocol = vlan.protocol
if next_protocol in protocol:
return True
return False
except Exception as error:
print(error)
return False
def get_protocol(data, lldp=False, oess=False, arp=False):
"""
Get protocol from data
Args:
data: PacketOut/PacketIn/OESS data
lldp: check for lldp
oess: check for oess
arp: check for arp
Returns:
protocol class if True
False if it is not
"""
protocol = []
if lldp:
protocol.append(35020) # LLDP
protocol.append(35138) # BDDP
elif oess:
protocol.append(34998) # Private
elif arp:
protocol.append(2054) # ARP 0x806
else:
return False
if isinstance(data, BinaryData):
data = dissect_data(data)
if isinstance(data, int):
return False
try:
eth = data.pop(0)
next_protocol = eth.protocol
if next_protocol in [33024]:
vlan = data.pop(0)
next_protocol = vlan.protocol
if next_protocol in protocol:
return data.pop(0)
return False
except Exception as error:
print(error)
return False
|
ajdavis/tornado
|
refs/heads/master
|
tornado/test/websocket_test.py
|
5
|
from __future__ import absolute_import, division, print_function
import functools
import sys
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado.template import DictLoader
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.test.util import unittest, skipBefore35, exec_test
from tornado.web import Application, RequestHandler
try:
import tornado.websocket # noqa
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
try:
from tornado import speedups
except ImportError:
speedups = None
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
def initialize(self, close_future, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
def on_message(self, message):
try:
self.write_message(message, isinstance(message, bytes))
except StreamClosedError:
pass
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1 / 0
class HeaderHandler(TestWebSocketHandler):
def open(self):
methods_to_test = [
functools.partial(self.write, 'This should not work'),
functools.partial(self.redirect, 'http://localhost/elsewhere'),
functools.partial(self.set_header, 'X-Test', ''),
functools.partial(self.set_cookie, 'Chocolate', 'Chip'),
functools.partial(self.set_status, 503),
self.flush,
self.finish,
]
for method in methods_to_test:
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
method()
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get('X-Test', ''))
class HeaderEchoHandler(TestWebSocketHandler):
def set_default_headers(self):
self.set_header("X-Extra-Response-Header", "Extra-Response-Value")
def prepare(self):
for k, v in self.request.headers.get_all():
if k.lower().startswith('x-test'):
self.set_header(k, v)
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class PathArgsHandler(TestWebSocketHandler):
def open(self, arg):
self.write_message(arg)
class CoroutineOnMessageHandler(TestWebSocketHandler):
def initialize(self, close_future, compression_options=None):
super(CoroutineOnMessageHandler, self).initialize(close_future,
compression_options)
self.sleeping = 0
@gen.coroutine
def on_message(self, message):
if self.sleeping > 0:
self.write_message('another coroutine is already sleeping')
self.sleeping += 1
yield gen.sleep(0.01)
self.sleeping -= 1
self.write_message(message)
class RenderMessageHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(self.render_string('message.html', message=message))
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, **kwargs):
ws = yield websocket_connect(
'ws://127.0.0.1:%d%s' % (self.get_http_port(), path),
**kwargs)
raise gen.Return(ws)
@gen.coroutine
def close(self, ws):
"""Close a websocket connection and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
"""
ws.close()
yield self.close_future
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
('/header', HeaderHandler, dict(close_future=self.close_future)),
('/header_echo', HeaderEchoHandler,
dict(close_future=self.close_future)),
('/close_reason', CloseReasonHandler,
dict(close_future=self.close_future)),
('/error_in_on_message', ErrorInOnMessageHandler,
dict(close_future=self.close_future)),
('/async_prepare', AsyncPrepareHandler,
dict(close_future=self.close_future)),
('/path_args/(.*)', PathArgsHandler,
dict(close_future=self.close_future)),
('/coroutine', CoroutineOnMessageHandler,
dict(close_future=self.close_future)),
('/render', RenderMessageHandler,
dict(close_future=self.close_future)),
], template_loader=DictLoader({
'message.html': '<b>{{ message }}</b>',
}))
def tearDown(self):
super(WebSocketTest, self).tearDown()
RequestHandler._template_loaders.clear()
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
def test_bad_websocket_version(self):
response = self.fetch('/echo',
headers={'Connection': 'Upgrade',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Version': '12'})
self.assertEqual(response.code, 426)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect('/echo')
yield ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
def test_websocket_callbacks(self):
websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port(),
callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(b'hello \xe9', binary=True)
response = yield ws.read_message()
self.assertEqual(response, b'hello \xe9')
yield self.close(ws)
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(u'hello \u00e9')
response = yield ws.read_message()
self.assertEqual(response, u'hello \u00e9')
yield self.close(ws)
@gen_test
def test_render_message(self):
ws = yield self.ws_connect('/render')
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, '<b>hello</b>')
yield self.close(ws)
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect('/error_in_on_message')
ws.write_message('hello')
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
yield self.close(ws)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect('/notfound')
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect('/non_ws')
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://127.0.0.1:%d/' % port,
connect_timeout=3600)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
# Close the underlying stream.
ws.stream.close()
yield self.close_future
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header' % self.get_http_port(),
headers={'X-Test': 'hello'}))
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_websocket_header_echo(self):
# Ensure that headers can be returned in the response.
# Specifically, that arbitrary headers passed through websocket_connect
# can be returned.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header_echo' % self.get_http_port(),
headers={'X-Test-Hello': 'hello'}))
self.assertEqual(ws.headers.get('X-Test-Hello'), 'hello')
self.assertEqual(ws.headers.get('X-Extra-Response-Header'), 'Extra-Response-Value')
yield self.close(ws)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
code, reason = yield self.close_future
# The client echoed the close code it received to the server,
# so the server's close code (returned via close_future) is
# the same.
self.assertEqual(code, 1001)
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect('/echo')
ws.close(1001, 'goodbye')
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, 'goodbye')
@gen_test
def test_write_after_close(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
self.assertIs(msg, None)
with self.assertRaises(StreamClosedError):
ws.write_message('hello')
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect('/async_prepare')
ws.write_message('hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_path_args(self):
ws = yield self.ws_connect('/path_args/hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_coroutine(self):
ws = yield self.ws_connect('/coroutine')
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message('hello1')
yield ws.write_message('hello2')
res = yield ws.read_message()
self.assertEqual(res, 'hello1')
res = yield ws.read_message()
self.assertEqual(res, 'hello2')
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers))
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d/something' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers))
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': '127.0.0.1:%d' % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {'Origin': 'http://somewhereelse.com'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {'Origin': 'http://subtenant.localhost'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
if sys.version_info >= (3, 5):
NativeCoroutineOnMessageHandler = exec_test(globals(), locals(), """
class NativeCoroutineOnMessageHandler(TestWebSocketHandler):
def initialize(self, close_future, compression_options=None):
super().initialize(close_future, compression_options)
self.sleeping = 0
async def on_message(self, message):
if self.sleeping > 0:
self.write_message('another coroutine is already sleeping')
self.sleeping += 1
await gen.sleep(0.01)
self.sleeping -= 1
self.write_message(message)""")['NativeCoroutineOnMessageHandler']
class WebSocketNativeCoroutineTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/native', NativeCoroutineOnMessageHandler,
dict(close_future=self.close_future))])
@skipBefore35
@gen_test
def test_native_coroutine(self):
ws = yield self.ws_connect('/native')
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message('hello1')
yield ws.write_message('hello2')
res = yield ws.read_message()
self.assertEqual(res, 'hello1')
res = yield ws.read_message()
self.assertEqual(res, 'hello2')
class CompressionTestMixin(object):
MESSAGE = 'Hello world. Testing 123 123'
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(
close_future=self.close_future,
compression_options=self.get_server_compression_options())),
])
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
'/echo',
compression_options=self.get_client_compression_options())
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in,
ws.protocol._wire_bytes_out)
yield self.close(ws)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b'abcd', b''), b'')
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
b'\xff\xfb\xfd\xfc\xfe\xfa'),
b'\xff\xfa\xff\xff\xfe\xfb')
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
b'\x00\x01\x02\x03\x04\x05'),
b'\xff\xfa\xff\xff\xfb\xfe')
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
class ServerPeriodicPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_pong(self, data):
self.write_message("got pong")
self.close_future = Future()
return Application([
('/', PingHandler, dict(close_future=self.close_future)),
], websocket_ping_interval=0.01)
@gen_test
def test_server_ping(self):
ws = yield self.ws_connect('/')
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got pong")
yield self.close(ws)
# TODO: test that the connection gets closed if ping responses stop.
class ClientPeriodicPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_ping(self, data):
self.write_message("got ping")
self.close_future = Future()
return Application([
('/', PingHandler, dict(close_future=self.close_future)),
])
@gen_test
def test_client_ping(self):
ws = yield self.ws_connect('/', ping_interval=0.01)
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got ping")
yield self.close(ws)
# TODO: test that the connection gets closed if ping responses stop.
class MaxMessageSizeTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/', EchoHandler, dict(close_future=self.close_future)),
], websocket_max_message_size=1024)
@gen_test
def test_large_message(self):
ws = yield self.ws_connect('/')
# Write a message that is allowed.
msg = 'a' * 1024
ws.write_message(msg)
resp = yield ws.read_message()
self.assertEqual(resp, msg)
# Write a message that is too large.
ws.write_message(msg + 'b')
resp = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(resp, None)
self.assertEqual(ws.close_code, 1009)
self.assertEqual(ws.close_reason, "message too big")
# TODO: Needs tests of messages split over multiple
# continuation frames.
|
VMTrooper/squadtracker
|
refs/heads/master
|
destiny/views.py
|
1
|
from django.shortcuts import redirect, render
from django.http import HttpResponse
from django.template.loader import render_to_string
from destiny.models import Item, List
# Create your views here.
def home_page(request):
return render(request, 'home.html')
def view_squad(request, list_id):
list_ = List.objects.get(id=list_id)
return render(request, 'list.html', {'squad':list_})
def new_squad(request):
list_ = List.objects.create()
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect('/squads/%d/' %(list_.id,))
def add_item(request, list_id):
list_ = List.objects.get(id=list_id)
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect('/squads/%d/' % (list_.id,))
|
kuiwei/kuiwei
|
refs/heads/master
|
lms/djangoapps/licenses/tests.py
|
30
|
"""Tests for License package"""
import logging
import json
from uuid import uuid4
from random import shuffle
from tempfile import NamedTemporaryFile
import factory
from factory.django import DjangoModelFactory
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.core.management import call_command
from django.core.urlresolvers import reverse
from nose.tools import assert_true # pylint: disable=E0611
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from licenses.models import CourseSoftware, UserLicense
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
COURSE_1 = 'edX/toy/2012_Fall'
SOFTWARE_1 = 'matlab'
SOFTWARE_2 = 'stata'
SERIAL_1 = '123456abcde'
log = logging.getLogger(__name__)
class CourseSoftwareFactory(DjangoModelFactory):
'''Factory for generating CourseSoftware objects in database'''
FACTORY_FOR = CourseSoftware
name = SOFTWARE_1
full_name = SOFTWARE_1
url = SOFTWARE_1
course_id = COURSE_1
class UserLicenseFactory(DjangoModelFactory):
'''
Factory for generating UserLicense objects in database
By default, the user assigned is null, indicating that the
serial number has not yet been assigned.
'''
FACTORY_FOR = UserLicense
user = None
software = factory.SubFactory(CourseSoftwareFactory)
serial = SERIAL_1
class LicenseTestCase(TestCase):
'''Tests for licenses.views'''
def setUp(self):
'''creates a user and logs in'''
# self.setup_viewtest_user()
self.user = UserFactory(username='test',
email='test@edx.org', password='test_password')
self.client = Client()
assert_true(self.client.login(username='test', password='test_password'))
self.software = CourseSoftwareFactory()
def test_get_license(self):
UserLicenseFactory(user=self.user, software=self.software)
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'false'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(200, response.status_code)
json_returned = json.loads(response.content)
self.assertFalse('error' in json_returned)
self.assertTrue('serial' in json_returned)
self.assertEquals(json_returned['serial'], SERIAL_1)
def test_get_nonexistent_license(self):
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'false'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(200, response.status_code)
json_returned = json.loads(response.content)
self.assertFalse('serial' in json_returned)
self.assertTrue('error' in json_returned)
def test_create_nonexistent_license(self):
'''Should not assign a license to an unlicensed user when none are available'''
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'true'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(200, response.status_code)
json_returned = json.loads(response.content)
self.assertFalse('serial' in json_returned)
self.assertTrue('error' in json_returned)
def test_create_license(self):
'''Should assign a license to an unlicensed user if one is unassigned'''
# create an unassigned license
UserLicenseFactory(software=self.software)
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'true'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(200, response.status_code)
json_returned = json.loads(response.content)
self.assertFalse('error' in json_returned)
self.assertTrue('serial' in json_returned)
self.assertEquals(json_returned['serial'], SERIAL_1)
def test_get_license_from_wrong_course(self):
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'false'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format('some/other/course'))
self.assertEqual(404, response.status_code)
def test_get_license_from_non_ajax(self):
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'false'},
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(404, response.status_code)
def test_get_license_without_software(self):
response = self.client.post(reverse('user_software_license'),
{'generate': 'false'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(404, response.status_code)
def test_get_license_without_login(self):
self.client.logout()
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'false'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
# if we're not logged in, we should be referred to the login page
self.assertEqual(302, response.status_code)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CommandTest(ModuleStoreTestCase):
'''Test management command for importing serial numbers'''
def setUp(self):
course = CourseFactory.create()
self.course_id = course.id
def test_import_serial_numbers(self):
size = 20
log.debug('Adding one set of serials for {0}'.format(SOFTWARE_1))
with generate_serials_file(size) as temp_file:
args = [self.course_id.to_deprecated_string(), SOFTWARE_1, temp_file.name]
call_command('import_serial_numbers', *args)
log.debug('Adding one set of serials for {0}'.format(SOFTWARE_2))
with generate_serials_file(size) as temp_file:
args = [self.course_id.to_deprecated_string(), SOFTWARE_2, temp_file.name]
call_command('import_serial_numbers', *args)
log.debug('There should be only 2 course-software entries')
software_count = CourseSoftware.objects.all().count()
self.assertEqual(2, software_count)
log.debug('We added two sets of {0} serials'.format(size))
licenses_count = UserLicense.objects.all().count()
self.assertEqual(2 * size, licenses_count)
log.debug('Adding more serial numbers to {0}'.format(SOFTWARE_1))
with generate_serials_file(size) as temp_file:
args = [self.course_id.to_deprecated_string(), SOFTWARE_1, temp_file.name]
call_command('import_serial_numbers', *args)
log.debug('There should be still only 2 course-software entries')
software_count = CourseSoftware.objects.all().count()
self.assertEqual(2, software_count)
log.debug('Now we should have 3 sets of 20 serials'.format(size))
licenses_count = UserLicense.objects.all().count()
self.assertEqual(3 * size, licenses_count)
software = CourseSoftware.objects.get(pk=1)
lics = UserLicense.objects.filter(software=software)[:size]
known_serials = list(l.serial for l in lics)
known_serials.extend(generate_serials(10))
shuffle(known_serials)
log.debug('Adding some new and old serials to {0}'.format(SOFTWARE_1))
with NamedTemporaryFile() as tmpfile:
tmpfile.write('\n'.join(known_serials))
tmpfile.flush()
args = [self.course_id.to_deprecated_string(), SOFTWARE_1, tmpfile.name]
call_command('import_serial_numbers', *args)
log.debug('Check if we added only the new ones')
licenses_count = UserLicense.objects.filter(software=software).count()
self.assertEqual((2 * size) + 10, licenses_count)
def generate_serials(size=20):
'''generate a list of serial numbers'''
return [str(uuid4()) for _ in range(size)]
def generate_serials_file(size=20):
'''output list of generated serial numbers to a temp file'''
serials = generate_serials(size)
temp_file = NamedTemporaryFile()
temp_file.write('\n'.join(serials))
temp_file.flush()
return temp_file
|
jsteemann/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/debugger/configui.py
|
17
|
from dbgcon import *
from pywin.mfc import dialog
class DebuggerOptionsPropPage(dialog.PropertyPage):
def __init__(self):
dialog.PropertyPage.__init__(self, win32ui.IDD_PP_DEBUGGER)
def OnInitDialog(self):
options = self.options = LoadDebuggerOptions()
self.AddDDX(win32ui.IDC_CHECK1, OPT_HIDE)
self[OPT_STOP_EXCEPTIONS] = options[OPT_STOP_EXCEPTIONS]
self.AddDDX(win32ui.IDC_CHECK2, OPT_STOP_EXCEPTIONS)
self[OPT_HIDE] = options[OPT_HIDE]
return dialog.PropertyPage.OnInitDialog(self)
def OnOK(self):
self.UpdateData()
dirty = 0
for key, val in self.items():
if self.options.has_key(key):
if self.options[key] != val:
self.options[key] = val
dirty = 1
if dirty:
SaveDebuggerOptions(self.options)
# If there is a debugger open, then set its options.
import pywin.debugger
if pywin.debugger.currentDebugger is not None:
pywin.debugger.currentDebugger.options = self.options
return 1
|
cervinko/calibre-web
|
refs/heads/master
|
cps/web.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mimetypes
import logging
from logging.handlers import RotatingFileHandler
import sys
import textwrap
mimetypes.add_type('application/xhtml+xml','.xhtml')
from flask import Flask, render_template, session, request, Response, redirect, url_for, send_from_directory, make_response, g, flash, abort
import db, config, ub, helper
import os
import errno
from sqlalchemy.sql.expression import func
from sqlalchemy.sql.expression import false
from sqlalchemy.exc import IntegrityError
from math import ceil
from flask.ext.login import LoginManager, login_user, logout_user, login_required, current_user, AnonymousUserMixin
from flask.ext.principal import Principal, Identity, AnonymousIdentity, identity_changed
import requests, zipfile
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
import base64
from sqlalchemy.sql import *
import json
import datetime
from uuid import uuid4
from shutil import copyfile
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
Code courtesy of: http://flask.pocoo.org/snippets/35/
In nginx:
location /myprefix {
proxy_pass http://127.0.0.1:8083;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ.get('PATH_INFO', '')
if path_info and path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
server = environ.get('HTTP_X_FORWARDED_SERVER', '')
if server:
environ['HTTP_HOST'] = server
return self.app(environ, start_response)
app = (Flask(__name__))
app.wsgi_app = ReverseProxied(app.wsgi_app)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
file_handler = RotatingFileHandler(os.path.join(config.LOG_DIR, "calibre-web.log"), maxBytes=10000, backupCount=1)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
app.logger.info('Starting Calibre Web...')
logging.getLogger("book_formats").addHandler(file_handler)
logging.getLogger("book_formats").setLevel(logging.INFO)
Principal(app)
class Anonymous(AnonymousUserMixin):
def __init__(self):
self.nickname = 'Guest'
self.role = -1
def role_admin(self):
return False
def role_download(self):
return False
def role_upload(self):
return False
def role_edit(self):
return False
lm = LoginManager(app)
lm.init_app(app)
lm.login_view = 'login'
lm.anonymous_user = Anonymous
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
@lm.user_loader
def load_user(id):
return ub.session.query(ub.User).filter(ub.User.id == int(id)).first()
@lm.header_loader
def load_user_from_header(header_val):
if header_val.startswith('Basic '):
header_val = header_val.replace('Basic ', '', 1)
try:
header_val = base64.b64decode(header_val)
basic_username = header_val.split(':')[0]
basic_password = header_val.split(':')[1]
except TypeError:
pass
user = ub.session.query(ub.User).filter(ub.User.nickname == basic_username).first()
if user and check_password_hash(user.password, basic_password):
return user
return
def check_auth(username, password):
user = ub.session.query(ub.User).filter(ub.User.nickname == username).first()
if user and check_password_hash(user.password, password):
return True
else:
return False
def authenticate():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_basic_auth_if_no_ano(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if config.ANON_BROWSE != 1:
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
#simple pagination for the feed
class Pagination(object):
def __init__(self, page, per_page, total_count):
self.page = page
self.per_page = per_page
self.total_count = total_count
@property
def pages(self):
return int(ceil(self.total_count / float(self.per_page)))
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and \
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
##pagination links in jinja
def url_for_other_page(page):
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
def login_required_if_no_ano(func):
if config.ANON_BROWSE == 1:
return func
return login_required(func)
## custom jinja filters
@app.template_filter('shortentitle')
def shortentitle_filter(s):
if len(s) > 60:
s = s.split(':', 1)[0]
if len(s) > 60:
s = textwrap.wrap(s, 60, break_long_words=False)[0]+' [...]'
return s
def admin_required(f):
"""
Checks if current_user.role == 1
"""
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
def download_required(f):
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_download() or current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
def upload_required(f):
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_upload() or current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
def edit_required(f):
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_edit() or current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
@app.before_request
def before_request():
g.user = current_user
g.public_shelfes = ub.session.query(ub.Shelf).filter(ub.Shelf.is_public == 1).all()
g.allow_registration = config.PUBLIC_REG
g.allow_upload = config.UPLOADING
@app.route("/feed")
@requires_basic_auth_if_no_ano
def feed_index():
xml = render_template('index.xml')
response= make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/feed/osd")
@requires_basic_auth_if_no_ano
def feed_osd():
xml = render_template('osd.xml')
response= make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/feed/search", methods=["GET"])
@requires_basic_auth_if_no_ano
def feed_search():
term = request.args.get("query")
if term:
random = db.session.query(db.Books).order_by(func.random()).limit(config.RANDOM_BOOKS)
entries = db.session.query(db.Books).filter(db.or_(db.Books.tags.any(db.Tags.name.like("%"+term+"%")),db.Books.authors.any(db.Authors.name.like("%"+term+"%")),db.Books.title.like("%"+term+"%"))).all()
xml = render_template('feed.xml', searchterm=term, entries=entries)
else:
xml = render_template('feed.xml', searchterm="")
response= make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/feed/new")
@requires_basic_auth_if_no_ano
def feed_new():
off = request.args.get("start_index")
if off:
entries = db.session.query(db.Books).order_by(db.Books.last_modified.desc()).offset(off).limit(config.NEWEST_BOOKS)
else:
entries = db.session.query(db.Books).order_by(db.Books.last_modified.desc()).limit(config.NEWEST_BOOKS)
off = 0
xml = render_template('feed.xml', entries=entries, next_url="/feed/new?start_index=%d" % (int(config.NEWEST_BOOKS) + int(off)))
response= make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/feed/discover")
@requires_basic_auth_if_no_ano
def feed_discover():
off = request.args.get("start_index")
if off:
entries = db.session.query(db.Books).order_by(func.random()).offset(off).limit(config.NEWEST_BOOKS)
else:
entries = db.session.query(db.Books).order_by(func.random()).limit(config.NEWEST_BOOKS)
off = 0
xml = render_template('feed.xml', entries=entries, next_url="/feed/discover?start_index=%d" % (int(config.NEWEST_BOOKS) + int(off)))
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/feed/hot")
@requires_basic_auth_if_no_ano
def feed_hot():
off = request.args.get("start_index")
if off:
entries = db.session.query(db.Books).filter(db.Books.ratings.any(db.Ratings.rating > 9)).offset(off).limit(config.NEWEST_BOOKS)
else:
entries = db.session.query(db.Books).filter(db.Books.ratings.any(db.Ratings.rating > 9)).limit(config.NEWEST_BOOKS)
off = 0
xml = render_template('feed.xml', entries=entries, next_url="/feed/hot?start_index=%d" % (int(config.NEWEST_BOOKS) + int(off)))
response= make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/feed/download/<int:book_id>/<format>")
@requires_basic_auth_if_no_ano
@download_required
def get_opds_download_link(book_id, format):
format = format.split(".")[0]
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
data = db.session.query(db.Data).filter(db.Data.book == book.id).filter(db.Data.format == format.upper()).first()
helper.update_download(book_id, int(current_user.id))
author = helper.get_normalized_author(book.author_sort)
file_name = book.title
if len(author) > 0:
file_name = author+'-'+file_name
file_name = helper.get_valid_filename(file_name)
response = make_response(send_from_directory(os.path.join(config.DB_ROOT, book.path), data.name + "." +format))
response.headers["Content-Disposition"] = "attachment; filename=%s.%s" % (data.name, format)
return response
@app.route("/get_authors_json", methods = ['GET', 'POST'])
@login_required_if_no_ano
def get_authors_json():
if request.method == "GET":
query = request.args.get('q')
entries = db.session.execute("select name from authors where name like '%" + query + "%'")
json_dumps = json.dumps([dict(r) for r in entries])
return json_dumps
@app.route("/get_tags_json", methods = ['GET', 'POST'])
@login_required_if_no_ano
def get_tags_json():
if request.method == "GET":
query = request.args.get('q')
entries = db.session.execute("select name from tags where name like '%" + query + "%'")
json_dumps = json.dumps([dict(r) for r in entries])
return json_dumps
@app.route("/get_series_json", methods = ['GET', 'POST'])
@login_required_if_no_ano
def get_series_json():
if request.method == "GET":
query = request.args.get('q')
entries = db.session.execute("select name from series where name like '%" + query + "%'")
json_dumps = json.dumps([dict(r) for r in entries])
return json_dumps
@app.route("/get_matching_tags", methods = ['GET', 'POST'])
@login_required_if_no_ano
def get_matching_tags():
tag_dict = {'tags': []}
if request.method == "GET":
q = db.session.query(db.Books)
author_input = request.args.get('author_name')
title_input = request.args.get('book_title')
include_tag_inputs = request.args.getlist('include_tag')
exclude_tag_inputs = request.args.getlist('exclude_tag')
q = q.filter(db.Books.authors.any(db.Authors.name.like("%" + author_input + "%")), db.Books.title.like("%"+title_input+"%"))
if len(include_tag_inputs) > 0:
for tag in include_tag_inputs:
q = q.filter(db.Books.tags.any(db.Tags.id == tag))
if len(exclude_tag_inputs) > 0:
for tag in exclude_tag_inputs:
q = q.filter(not_(db.Books.tags.any(db.Tags.id == tag)))
for book in q:
for tag in book.tags:
if tag.id not in tag_dict['tags']:
tag_dict['tags'].append(tag.id)
json_dumps = json.dumps(tag_dict)
return json_dumps
@app.route("/", defaults={'page': 1})
@app.route('/page/<int:page>')
@login_required_if_no_ano
def index(page):
random = db.session.query(db.Books).order_by(func.random()).limit(config.RANDOM_BOOKS)
if page == 1:
entries = db.session.query(db.Books).order_by(db.Books.last_modified.desc()).limit(config.NEWEST_BOOKS)
else:
off = int(int(config.NEWEST_BOOKS) * (page - 1))
entries = db.session.query(db.Books).order_by(db.Books.last_modified.desc()).offset(off).limit(config.NEWEST_BOOKS)
pagination = Pagination(page, config.NEWEST_BOOKS, len(db.session.query(db.Books).all()))
return render_template('index.html', random=random, entries=entries, pagination=pagination, title="Latest Books")
@app.route("/hot", defaults={'page': 1})
@app.route('/hot/page/<int:page>')
@login_required_if_no_ano
def hot_books(page):
random = db.session.query(db.Books).filter(false())
off = int(int(6) * (page - 1))
all_books = ub.session.query(ub.Downloads, ub.func.count(ub.Downloads.book_id)).order_by(ub.func.count(ub.Downloads.book_id).desc()).group_by(ub.Downloads.book_id)
hot_books = all_books.offset(off).limit(config.NEWEST_BOOKS)
entries = list()
for book in hot_books:
entries.append(db.session.query(db.Books).filter(db.Books.id == book.Downloads.book_id).first())
numBooks = len(all_books.all())
pages = int(ceil(numBooks / float(config.NEWEST_BOOKS)))
if pages > 1:
pagination = Pagination(page, config.NEWEST_BOOKS, len(all_books.all()))
return render_template('index.html', random=random, entries=entries, pagination=pagination, title="Hot Books (most downloaded)")
else:
return render_template('index.html', random=random, entries=entries, title="Hot Books (most downloaded)")
@app.route("/stats")
@login_required
def stats():
counter = len(db.session.query(db.Books).all())
return render_template('stats.html', counter=counter, title="Statistics")
@app.route("/discover", defaults={'page': 1})
@app.route('/discover/page/<int:page>')
@login_required_if_no_ano
def discover(page):
if page == 1:
entries = db.session.query(db.Books).order_by(func.randomblob(2)).limit(config.NEWEST_BOOKS)
else:
off = int(int(config.NEWEST_BOOKS) * (page - 1))
entries = db.session.query(db.Books).order_by(func.randomblob(2)).offset(off).limit(config.NEWEST_BOOKS)
pagination = Pagination(page, config.NEWEST_BOOKS, len(db.session.query(db.Books).all()))
return render_template('discover.html', entries=entries, pagination=pagination, title="Random Books")
@app.route("/book/<int:id>")
@login_required_if_no_ano
def show_book(id):
entries = db.session.query(db.Books).filter(db.Books.id == id).first()
cc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
book_in_shelfs = []
shelfs = ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == id).all()
for entry in shelfs:
book_in_shelfs.append(entry.shelf)
return render_template('detail.html', entry=entries, cc=cc, title=entries.title, books_shelfs=book_in_shelfs)
@app.route("/category")
@login_required_if_no_ano
def category_list():
entries = db.session.query(db.Tags).order_by(db.Tags.name).all()
return render_template('categories.html', entries=entries, title="Category list")
@app.route("/category/<name>")
@login_required_if_no_ano
def category(name):
random = db.session.query(db.Books).filter(false())
if name != "all":
entries = db.session.query(db.Books).filter(db.Books.tags.any(db.Tags.name.like("%" +name + "%" ))).order_by(db.Books.last_modified.desc()).all()
else:
entries = db.session.query(db.Books).all()
return render_template('index.html', random=random, entries=entries, title="Category: %s" % name)
@app.route("/series/<name>")
@login_required_if_no_ano
def series(name):
random = db.session.query(db.Books).filter(false())
entries = db.session.query(db.Books).filter(db.Books.series.any(db.Series.name.like("%" +name + "%" ))).order_by(db.Books.series_index).all()
return render_template('index.html', random=random, entries=entries, title="Series: %s" % name)
@app.route("/admin/")
@login_required
def admin():
#return "Admin ONLY!"
abort(403)
@app.route("/search", methods=["GET"])
@login_required_if_no_ano
def search():
term = request.args.get("query")
if term:
random = db.session.query(db.Books).order_by(func.random()).limit(config.RANDOM_BOOKS)
entries = db.session.query(db.Books).filter(db.or_(db.Books.tags.any(db.Tags.name.like("%"+term+"%")),db.Books.series.any(db.Series.name.like("%"+term+"%")),db.Books.authors.any(db.Authors.name.like("%"+term+"%")),db.Books.title.like("%"+term+"%"))).all()
return render_template('search.html', searchterm=term, entries=entries)
else:
return render_template('search.html', searchterm="")
@app.route("/advanced_search", methods=["GET"])
@login_required_if_no_ano
def advanced_search():
if request.method == 'GET':
q = db.session.query(db.Books)
include_tag_inputs = request.args.getlist('include_tag')
exclude_tag_inputs = request.args.getlist('exclude_tag')
author_name = request.args.get("author_name")
book_title = request.args.get("book_title")
if include_tag_inputs or exclude_tag_inputs or author_name or book_title:
searchterm = []
searchterm.extend((author_name, book_title))
tag_names = db.session.query(db.Tags).filter(db.Tags.id.in_(include_tag_inputs)).all()
searchterm.extend(tag.name for tag in tag_names)
searchterm = " + ".join(filter(None, searchterm))
q = q.filter(db.Books.authors.any(db.Authors.name.like("%" + author_name + "%")), db.Books.title.like("%"+book_title+"%"))
random = db.session.query(db.Books).order_by(func.random()).limit(config.RANDOM_BOOKS)
for tag in include_tag_inputs:
q = q.filter(db.Books.tags.any(db.Tags.id == tag))
for tag in exclude_tag_inputs:
q = q.filter(not_(db.Books.tags.any(db.Tags.id == tag)))
q = q.all()
return render_template('search.html', searchterm=searchterm, entries=q)
tags = db.session.query(db.Tags).order_by(db.Tags.name).all()
return render_template('search_form.html', tags=tags)
@app.route("/author")
@login_required_if_no_ano
def author_list():
entries = db.session.query(db.Authors).order_by(db.Authors.sort).all()
return render_template('authors.html', entries=entries, title="Author list")
@app.route("/author/<name>")
@login_required_if_no_ano
def author(name):
random = db.session.query(db.Books).filter(false())
entries = db.session.query(db.Books).filter(db.Books.authors.any(db.Authors.name.like("%" + name + "%"))).all()
return render_template('index.html', random=random, entries=entries, title="Author: %s" % name)
@app.route("/cover/<path:cover_path>")
@login_required_if_no_ano
def get_cover(cover_path):
return send_from_directory(os.path.join(config.DB_ROOT, cover_path), "cover.jpg")
@app.route("/read/<int:book_id>")
@login_required
def read_book(book_id):
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
book_dir = os.path.join(config.MAIN_DIR, "cps","static", str(book_id))
if not os.path.exists(book_dir):
os.mkdir(book_dir)
for data in book.data:
if data.format.lower() == "epub":
epub_file = os.path.join(config.DB_ROOT, book.path, data.name) + ".epub"
if not os.path.isfile(epub_file):
raise ValueError('Error opening eBook. File does not exist: ', epub_file)
zfile = zipfile.ZipFile(epub_file)
for name in zfile.namelist():
(dirName, fileName) = os.path.split(name)
newDir = os.path.join(book_dir, dirName)
if not os.path.exists(newDir):
try:
os.makedirs(newDir)
except OSError as exception:
if exception.errno == errno.EEXIST:
pass
else:
raise
if fileName:
fd = open(os.path.join(newDir, fileName), "wb")
fd.write(zfile.read(name))
fd.close()
zfile.close()
break
return render_template('read.html', bookid=book_id, title="Read a Book")
@app.route("/download/<int:book_id>/<format>")
@login_required
@download_required
def get_download_link(book_id, format):
format = format.split(".")[0]
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
data = db.session.query(db.Data).filter(db.Data.book == book.id).filter(db.Data.format == format.upper()).first()
helper.update_download(book_id, int(current_user.id))
author = helper.get_normalized_author(book.author_sort)
file_name = book.title
if len(author) > 0:
file_name = author+'-'+file_name
file_name = helper.get_valid_filename(file_name)
response = make_response(send_from_directory(os.path.join(config.DB_ROOT, book.path), data.name + "." +format))
response.headers["Content-Disposition"] = \
"attachment; " \
"filename={utf_filename}.{suffix};" \
"filename*=UTF-8''{utf_filename}.{suffix}".format(
utf_filename=file_name.encode('utf-8'),
suffix=format
)
return response
@app.route('/register', methods = ['GET', 'POST'])
def register():
error = None
if not config.PUBLIC_REG:
abort(404)
if current_user is not None and current_user.is_authenticated():
return redirect(url_for('index', _external=True))
if request.method == "POST":
to_save = request.form.to_dict()
if not to_save["nickname"] or not to_save["email"] or not to_save["password"]:
flash("Please fill out all fields!", category="error")
return render_template('register.html', title="register")
existing_user = ub.session.query(ub.User).filter(ub.User.nickname == to_save["nickname"]).first()
existing_email = ub.session.query(ub.User).filter(ub.User.email == to_save["email"]).first()
if not existing_user and not existing_email:
content = ub.User()
content.password = generate_password_hash(to_save["password"])
content.nickname = to_save["nickname"]
content.email = to_save["email"]
content.role = 0
try:
ub.session.add(content)
ub.session.commit()
except:
ub.session.rollback()
flash("An unknown error occured. Please try again later.", category="error")
return render_template('register.html', title="register")
flash("Your account has been created. Please login.", category="success")
return redirect(url_for('login', _external=True))
else:
flash("This username or email address is already in use.", category="error")
return render_template('register.html', title="register")
return render_template('register.html', title="register")
@app.route('/login', methods = ['GET', 'POST'])
def login():
error = None
if current_user is not None and current_user.is_authenticated():
return redirect(url_for('index', _external=True))
if request.method == "POST":
form = request.form.to_dict()
user = ub.session.query(ub.User).filter(ub.User.nickname == form['username']).first()
if user and check_password_hash(user.password, form['password']):
login_user(user, remember = True)
flash("you are now logged in as: '%s'" % user.nickname, category="success")
return redirect(request.args.get("next") or url_for("index", _external=True))
else:
flash("Wrong Username or Password", category="error")
return render_template('login.html', title="login")
@app.route('/logout')
@login_required
def logout():
if current_user is not None and current_user.is_authenticated():
logout_user()
return redirect(request.args.get("next") or url_for("index", _external=True))
@app.route('/send/<int:book_id>')
@login_required
@download_required
def send_to_kindle(book_id):
settings = ub.get_mail_settings()
if settings.get("mail_server", "mail.example.com") == "mail.example.com":
flash("Please configure the SMTP mail settings first...", category="error")
elif current_user.kindle_mail:
result = helper.send_mail(book_id, current_user.kindle_mail)
if result is None:
flash("Book successfully send to %s" % current_user.kindle_mail, category="success")
helper.update_download(book_id, int(current_user.id))
else:
flash("There was an error sending this book: %s" % result, category="error")
else:
flash("Please configure your kindle email address first...", category="error")
return redirect(request.environ["HTTP_REFERER"])
@app.route("/shelf/add/<int:shelf_id>/<int:book_id>")
@login_required
def add_to_shelf(shelf_id, book_id):
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if not shelf.is_public and not shelf.user_id == int(current_user.id):
flash("Sorry you are not allowed to add a book to the the shelf: %s" % shelf.name)
return redirect(url_for('index', _external=True))
ins = ub.BookShelf(shelf=shelf.id, book_id=book_id)
ub.session.add(ins)
ub.session.commit()
flash("Book has been added to shelf: %s" % shelf.name, category="success")
#return redirect(url_for('show_book', id=book_id))
return redirect(request.environ["HTTP_REFERER"])
@app.route("/shelf/remove/<int:shelf_id>/<int:book_id>")
@login_required
def remove_from_shelf(shelf_id, book_id):
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if not shelf.is_public and not shelf.user_id == int(current_user.id):
flash("Sorry you are not allowed to remove a book from this shelf: %s" % shelf.name)
return redirect(url_for('index', _external=True))
book_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id, ub.BookShelf.book_id == book_id).first()
#rem = ub.BookShelf(shelf=shelf.id, book_id=book_id)
ub.session.delete(book_shelf)
ub.session.commit()
flash("Book has been removed from shelf: %s" % shelf.name, category="success")
return redirect(request.environ["HTTP_REFERER"])
@app.route("/shelf/create", methods=["GET", "POST"])
@login_required
def create_shelf():
shelf = ub.Shelf()
if request.method == "POST":
to_save = request.form.to_dict()
if "is_public" in to_save:
shelf.is_public = 1
shelf.name = to_save["title"]
shelf.user_id = int(current_user.id)
existing_shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.name == shelf.name).first()
if existing_shelf:
flash("A shelf with the name '%s' already exists." % to_save["title"], category="error")
else:
try:
ub.session.add(shelf)
ub.session.commit()
flash("Shelf %s created" % to_save["title"], category="success")
except:
flash("There was an error", category="error")
return render_template('shelf_edit.html', title="create a shelf")
else:
return render_template('shelf_edit.html', title="create a shelf")
@app.route("/shelf/<int:shelf_id>")
@login_required
def show_shelf(shelf_id):
shelf = ub.session.query(ub.Shelf).filter(ub.or_(ub.and_(ub.Shelf.user_id == int(current_user.id), ub.Shelf.id == shelf_id), ub.and_(ub.Shelf.is_public == 1, ub.Shelf.id == shelf_id))).first()
result = list()
if shelf:
books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).all()
for book in books_in_shelf:
cur_book = db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
result.append(cur_book)
return render_template('shelf.html', entries=result, title="Shelf: '%s'" % shelf.name)
@app.route("/me", methods = ["GET", "POST"])
@login_required
def profile():
content = ub.session.query(ub.User).filter(ub.User.id == int(current_user.id)).first()
downloads = list()
for book in content.downloads:
downloads.append(db.session.query(db.Books).filter(db.Books.id == book.book_id).first())
if request.method == "POST":
to_save = request.form.to_dict()
if current_user.role_passwd() or current_user.role_admin():
if to_save["password"]:
content.password = generate_password_hash(to_save["password"])
if "kindle_mail" in to_save and to_save["kindle_mail"] != content.kindle_mail:
content.kindle_mail = to_save["kindle_mail"]
if to_save["email"] and to_save["email"] != content.email:
content.email = to_save["email"]
try:
ub.session.commit()
except IntegrityError:
ub.session.rollback()
flash("Found an existing account for this email address.", category="error")
return render_template("user_edit.html", content=content, downloads=downloads, title="%s's profile" % current_user.nickname)
flash("Profile updated", category="success")
return render_template("user_edit.html", profile=1, content=content, downloads=downloads, title="%s's profile" % current_user.nickname)
@app.route("/admin/user")
@login_required
@admin_required
def user_list():
content = ub.session.query(ub.User).all()
settings = ub.session.query(ub.Settings).first()
return render_template("user_list.html", content=content, email=settings, title="User list")
@app.route("/admin/user/new", methods = ["GET", "POST"])
@login_required
@admin_required
def new_user():
content = ub.User()
if request.method == "POST":
to_save = request.form.to_dict()
if not to_save["nickname"] or not to_save["email"] or not to_save["password"]:
flash("Please fill out all fields!", category="error")
return render_template("user_edit.html", new_user=1, content=content, title="Add new user")
content.password = generate_password_hash(to_save["password"])
content.nickname = to_save["nickname"]
content.email = to_save["email"]
content.role = 0
if "admin_role" in to_save:
content.role = content.role + ub.ROLE_ADMIN
if "download_role" in to_save:
content.role = content.role + ub.ROLE_DOWNLOAD
if "upload_role" in to_save:
content.role = content.role + ub.ROLE_UPLOAD
if "edit_role" in to_save:
content.role = content.role + ub.ROLE_EDIT
if "passwd_role" in to_save:
content.role = content.role + ub.ROLE_PASSWD
try:
ub.session.add(content)
ub.session.commit()
flash("User '%s' created" % content.nickname, category="success")
return redirect(url_for('user_list', _external=True))
except IntegrityError:
ub.session.rollback()
flash("Found an existing account for this email address or nickname.", category="error")
return render_template("user_edit.html", new_user=1, content=content, title="Add new user")
@app.route("/admin/user/mailsettings", methods = ["GET", "POST"])
@login_required
@admin_required
def edit_mailsettings():
content = ub.session.query(ub.Settings).first()
if request.method == "POST":
to_save = request.form.to_dict()
content.mail_server = to_save["mail_server"]
content.mail_port = int(to_save["mail_port"])
content.mail_login = to_save["mail_login"]
content.mail_password = to_save["mail_password"]
content.mail_from = to_save["mail_from"]
if "mail_use_ssl" in to_save:
content.mail_use_ssl = 1
else:
content.mail_use_ssl = 0
try:
ub.session.commit()
flash("Mail settings updated", category="success")
except (e):
flash(e, category="error")
return render_template("email_edit.html", content=content, title="Edit mail settings")
@app.route("/admin/user/<int:user_id>", methods = ["GET", "POST"])
@login_required
@admin_required
def edit_user(user_id):
content = ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
downloads = list()
for book in content.downloads:
downloads.append(db.session.query(db.Books).filter(db.Books.id == book.book_id).first())
if request.method == "POST":
to_save = request.form.to_dict()
if "delete" in to_save:
ub.session.delete(content)
flash("User '%s' deleted" % content.nickname, category="success")
return redirect(url_for('user_list', _external=True))
else:
if to_save["password"]:
content.password = generate_password_hash(to_save["password"])
if "admin_role" in to_save and not content.role_admin():
content.role = content.role + ub.ROLE_ADMIN
elif not "admin_role" in to_save and content.role_admin():
content.role = content.role - ub.ROLE_ADMIN
if "download_role" in to_save and not content.role_download():
content.role = content.role + ub.ROLE_DOWNLOAD
elif not "download_role" in to_save and content.role_download():
content.role = content.role - ub.ROLE_DOWNLOAD
if "upload_role" in to_save and not content.role_upload():
content.role = content.role + ub.ROLE_UPLOAD
elif not "upload_role" in to_save and content.role_upload():
content.role = content.role - ub.ROLE_UPLOAD
if "edit_role" in to_save and not content.role_edit():
content.role = content.role + ub.ROLE_EDIT
elif not "edit_role" in to_save and content.role_edit():
content.role = content.role - ub.ROLE_EDIT
if "passwd_role" in to_save and not content.role_passwd():
content.role = content.role + ub.ROLE_PASSWD
elif not "passwd_role" in to_save and content.role_passwd():
content.role = content.role - ub.ROLE_PASSWD
if to_save["email"] and to_save["email"] != content.email:
content.email = to_save["email"]
if "kindle_mail" in to_save and to_save["kindle_mail"] != content.kindle_mail:
content.kindle_mail = to_save["kindle_mail"]
try:
ub.session.commit()
flash("User '%s' updated" % content.nickname, category="success")
except IntegrityError:
ub.session.rollback()
flash("An unknown error occured.", category="error")
return render_template("user_edit.html", new_user=0, content=content, downloads=downloads, title="Edit User %s" % content.nickname)
@app.route("/admin/book/<int:book_id>", methods=['GET', 'POST'])
@login_required
@edit_required
def edit_book(book_id):
## create the function for sorting...
db.session.connection().connection.connection.create_function("title_sort",1,db.title_sort)
cc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
author_names = []
for author in book.authors:
author_names.append(author.name)
if request.method == 'POST':
edited_books_id = set()
to_save = request.form.to_dict()
if book.title != to_save["book_title"]:
book.title = to_save["book_title"]
edited_books_id.add(book.id)
input_authors = to_save["author_name"].split('&')
input_authors = map(lambda it: it.strip(), input_authors)
# we have all author names now
author0_before_edit = book.authors[0].name
# 1. search for authors to remove
del_authors = []
for c_author in book.authors:
found = False
for inp_author in input_authors:
if inp_author == c_author.name:
found = True
break;
# if the author was not found in the new list, add him to remove list
if not found:
del_authors.append(c_author)
# 2. search for authors that need to be added
add_authors = []
for inp_author in input_authors:
found = False
for c_author in book.authors:
if inp_author == c_author.name:
found = True
break;
if not found:
add_authors.append(inp_author)
# if there are authors to remove, we remove them now
if len(del_authors) > 0:
for del_author in del_authors:
book.authors.remove(del_author)
authors_books_count = db.session.query(db.Books).filter(db.Books.authors.any(db.Authors.id.is_(del_author.id))).count()
if authors_books_count == 0:
db.session.query(db.Authors).filter(db.Authors.id == del_author.id).delete()
# if there are authors to add, we add them now!
if len(add_authors) > 0:
for add_author in add_authors:
# check if an author with that name exists
t_author = db.session.query(db.Authors).filter(db.Authors.name == add_author).first()
# if no author is found add it
if t_author == None:
new_author = db.Authors(add_author, add_author, "")
db.session.add(new_author)
t_author = db.session.query(db.Authors).filter(db.Authors.name == add_author).first()
# add author to book
book.authors.append(t_author)
if author0_before_edit != book.authors[0].name:
edited_books_id.add(book.id)
if to_save["cover_url"] and os.path.splitext(to_save["cover_url"])[1].lower() == ".jpg":
img = requests.get(to_save["cover_url"])
f = open(os.path.join(config.DB_ROOT, book.path, "cover.jpg"), "wb")
f.write(img.content)
f.close()
if book.series_index != to_save["series_index"]:
book.series_index = to_save["series_index"]
if len(book.comments):
book.comments[0].text = to_save["description"]
else:
book.comments.append(db.Comments(text=to_save["description"], book=book.id))
input_tags = to_save["tags"].split(',')
input_tags = map(lambda it: it.strip(), input_tags)
input_tags = [x for x in input_tags if x != '']
# we have all author names now
# 1. search for tags to remove
del_tags = []
for c_tag in book.tags:
found = False
for inp_tag in input_tags:
if inp_tag == c_tag.name:
found = True
break;
# if the tag was not found in the new list, add him to remove list
if not found:
del_tags.append(c_tag)
# 2. search for tags that need to be added
add_tags = []
for inp_tag in input_tags:
found = False
for c_tag in book.tags:
if inp_tag == c_tag.name:
found = True
break;
if not found:
add_tags.append(inp_tag)
# if there are tags to remove, we remove them now
if len(del_tags) > 0:
for del_tag in del_tags:
book.tags.remove(del_tag)
if len(del_tag.books) == 0:
db.session.delete(del_tag)
# if there are tags to add, we add them now!
if len(add_tags) > 0:
for add_tag in add_tags:
# check if a tag with that name exists
new_tag = db.session.query(db.Tags).filter(db.Tags.name == add_tag).first()
# if no tag is found add it
if new_tag == None:
new_tag = db.Tags(add_tag)
db.session.add(new_tag)
new_tag = db.session.query(db.Tags).filter(db.Tags.name == add_tag).first()
# add tag to book
book.tags.append(new_tag)
if to_save["series"].strip():
is_series = db.session.query(db.Series).filter(db.Series.name.like('%' + to_save["series"].strip() + '%')).first()
if is_series:
book.series.append(is_series)
else:
new_series = db.Series(name=to_save["series"].strip(), sort=to_save["series"].strip())
book.series.append(new_series)
if to_save["rating"].strip():
old_rating = False
if len(book.ratings) > 0:
old_rating = book.ratings[0].rating
ratingx2 = int(float(to_save["rating"]) *2)
if ratingx2 != old_rating:
is_rating = db.session.query(db.Ratings).filter(db.Ratings.rating == ratingx2).first()
if is_rating:
book.ratings.append(is_rating)
else:
new_rating = db.Ratings(rating=ratingx2)
book.ratings.append(new_rating)
if old_rating:
book.ratings.remove(book.ratings[0])
else:
if len(book.ratings) > 0:
book.ratings.remove(book.ratings[0])
for c in cc:
cc_string = "custom_column_" + str(c.id)
if not c.is_multiple:
if len(getattr(book, cc_string)) > 0:
cc_db_value = getattr(book, cc_string)[0].value
else:
cc_db_value = None
if to_save[cc_string].strip():
if c.datatype == 'rating':
to_save[cc_string] = str(int(float(to_save[cc_string]) *2))
if to_save[cc_string].strip() != cc_db_value:
if cc_db_value != None:
#remove old cc_val
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
if len(del_cc.books) == 0:
db.session.delete(del_cc)
cc_class = db.cc_classes[c.id]
new_cc = db.session.query(cc_class).filter(cc_class.value == to_save[cc_string].strip()).first()
# if no cc val is found add it
if new_cc == None:
new_cc = cc_class(value=to_save[cc_string].strip())
db.session.add(new_cc)
new_cc = db.session.query(cc_class).filter(cc_class.value == to_save[cc_string].strip()).first()
# add cc value to book
getattr(book, cc_string).append(new_cc)
else:
if cc_db_value != None:
#remove old cc_val
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
if len(del_cc.books) == 0:
db.session.delete(del_cc)
else:
input_tags = to_save[cc_string].split(',')
input_tags = map(lambda it: it.strip(), input_tags)
input_tags = [x for x in input_tags if x != '']
# we have all author names now
# 1. search for tags to remove
del_tags = []
for c_tag in getattr(book, cc_string):
found = False
for inp_tag in input_tags:
if inp_tag == c_tag.value:
found = True
break;
# if the tag was not found in the new list, add him to remove list
if not found:
del_tags.append(c_tag)
# 2. search for tags that need to be added
add_tags = []
for inp_tag in input_tags:
found = False
for c_tag in getattr(book, cc_string):
if inp_tag == c_tag.value:
found = True
break;
if not found:
add_tags.append(inp_tag)
# if there are tags to remove, we remove them now
if len(del_tags) > 0:
for del_tag in del_tags:
getattr(book, cc_string).remove(del_tag)
if len(del_tag.books) == 0:
db.session.delete(del_tag)
# if there are tags to add, we add them now!
if len(add_tags) > 0:
for add_tag in add_tags:
# check if a tag with that name exists
new_tag = db.session.query(db.cc_classes[c.id]).filter(db.cc_classes[c.id].value == add_tag).first()
# if no tag is found add it
if new_tag == None:
new_tag = db.cc_classes[c.id](value=add_tag)
db.session.add(new_tag)
new_tag = db.session.query(db.cc_classes[c.id]).filter(db.cc_classes[c.id].value == add_tag).first()
# add tag to book
getattr(book, cc_string).append(new_tag)
db.session.commit()
author_names = []
for author in book.authors:
author_names.append(author.name)
for b in edited_books_id:
helper.update_dir_stucture(b)
if "detail_view" in to_save:
return redirect(url_for('show_book', id=book.id, _external=True))
else:
return render_template('edit_book.html', book=book, authors=author_names, cc=cc)
else:
return render_template('edit_book.html', book=book, authors=author_names, cc=cc)
import uploader
from shutil import move
@app.route("/upload", methods = ["GET", "POST"])
@login_required
@upload_required
def upload():
if not config.UPLOADING:
abort(404)
## create the function for sorting...
db.session.connection().connection.connection.create_function("title_sort",1,db.title_sort)
db.session.connection().connection.connection.create_function('uuid4', 0, lambda : str(uuid4()))
if request.method == 'POST' and 'btn-upload' in request.files:
file = request.files['btn-upload']
meta = uploader.upload(file)
title = meta.title
author = meta.author
title_dir = helper.get_valid_filename(title.decode('utf-8'), False)
author_dir = helper.get_valid_filename(author.decode('utf-8'), False)
data_name = title_dir
filepath = config.DB_ROOT + "/" + author_dir + "/" + title_dir
saved_filename = filepath + "/" + data_name + meta.extension
if not os.path.exists(filepath):
try:
os.makedirs(filepath)
except OSError:
flash("Failed to create path %s (Permission denied)." % filepath, category="error")
return redirect(url_for('index', _external=True))
try:
move(meta.file_path, saved_filename)
except OSError:
flash("Failed to store file %s (Permission denied)." % saved_filename, category="error")
return redirect(url_for('index', _external=True))
file_size = os.path.getsize(saved_filename)
if meta.cover is None:
has_cover = 0
basedir = os.path.dirname(__file__)
copyfile(os.path.join(basedir, "static/generic_cover.jpg"), os.path.join(filepath, "cover.jpg"))
else:
has_cover = 1
move(meta.cover, os.path.join(filepath, "cover.jpg"))
is_author = db.session.query(db.Authors).filter(db.Authors.name == author).first()
if is_author:
db_author = is_author
else:
db_author = db.Authors(author, "", "")
db.session.add(db_author)
path = os.path.join(author_dir, title_dir)
db_book = db.Books(title, "", "", datetime.datetime.now(), datetime.datetime(101, 01,01), 1, datetime.datetime.now(), path, has_cover, db_author, [])
db_book.authors.append(db_author)
db_data = db.Data(db_book, meta.extension.upper()[1:], file_size, data_name)
db_book.data.append(db_data)
db.session.add(db_book)
db.session.commit()
author_names = []
for author in db_book.authors:
author_names.append(author.name)
cc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
if current_user.role_edit() or current_user.role_admin():
return render_template('edit_book.html', book=db_book, authors=author_names, cc=cc)
book_in_shelfs = []
return render_template('detail.html', entry=db_book, cc=cc, title=db_book.title, books_shelfs=book_in_shelfs)
|
alangwansui/mtl_ordercenter
|
refs/heads/master
|
openerp/addons/001_report/__init__.py
|
1
|
#!usr/bin/python
# -*-coding:utf-8 -*-
import sale_report
|
BhallaLab/moose-core
|
refs/heads/master
|
tests/core/params.py
|
4
|
# -*- coding: utf-8 -*-
"""params.py
Parameters used in this model
These parameters are from paper Miller et. al. "The stability of CaMKII
switch"
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2015, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
run_time = 30
N_CaMK = 10
N_PP1 = 100
num_switch = 1
voxel_length = 125e-9
num_voxels = 1
diff_consts = { 'x' : 1e-13, 'y' : 1e-13, 'pp1' : 1e-13 }
conc_i1_free = 0.1e-3
act_CaN = 1.0
act_PKA = 1.0
# Michaelis constant of protein phosphatase.
# 0.4 um to 2.0 uM have been used. Miller uses 0.2 um. The switch shows
# bistability of these ranges. We have taken the largest Km (or slowest) first
# step in dephosphorylation.
K_M = 10e-3
k_2 = 10.0
# Hill coefficientfor Ca++ activation of CaMKII
K_H1 = 0.7e-3
K_H2 = 0.3e-3
k_1 = 1.5
k_3 = 100e3
k_4 = 0.001
K_I = 1e-6
rate_loosex = 0.1
rate_loosey = 0.1
rate_gainx = 1
rate_gainy = 1
turnover_rate = 1/(30*3600.0)
v_1 = 1.268e-5
v_2 = 4.36e-3
phosphatase_inhibit = 280.0
vi = phosphatase_inhibit
## Calcium input expression.
ca_basal = 80e-6
ca_expr = "(fmod(t,4)<2)?{0}:({0}*(1+0.5*rand(-1)))".format( ca_basal )
|
auready/django
|
refs/heads/master
|
tests/reverse_lookup/tests.py
|
31
|
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Poll, User
class ReverseLookupTests(TestCase):
def setUp(self):
john = User.objects.create(name="John Doe")
jim = User.objects.create(name="Jim Bo")
first_poll = Poll.objects.create(
question="What's the first question?",
creator=john
)
second_poll = Poll.objects.create(
question="What's the second question?",
creator=jim
)
Choice.objects.create(
poll=first_poll,
related_poll=second_poll,
name="This is the answer."
)
def test_reverse_by_field(self):
u1 = User.objects.get(
poll__question__exact="What's the first question?"
)
self.assertEqual(u1.name, "John Doe")
u2 = User.objects.get(
poll__question__exact="What's the second question?"
)
self.assertEqual(u2.name, "Jim Bo")
def test_reverse_by_related_name(self):
p1 = Poll.objects.get(poll_choice__name__exact="This is the answer.")
self.assertEqual(p1.question, "What's the first question?")
p2 = Poll.objects.get(
related_choice__name__exact="This is the answer.")
self.assertEqual(p2.question, "What's the second question?")
def test_reverse_field_name_disallowed(self):
"""
If a related_name is given you can't use the field name instead
"""
with self.assertRaises(FieldError):
Poll.objects.get(choice__name__exact="This is the answer")
|
PGHS-CP1A-2015/python_koans_kjhansen
|
refs/heads/master
|
python3/koans/about_list_assignments.py
|
14
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutArrayAssignments in the Ruby Koans
#
from runner.koan import *
class AboutListAssignments(Koan):
def test_non_parallel_assignment(self):
names = ["John", "Smith"]
self.assertEqual(__, names)
def test_parallel_assignments(self):
first_name, last_name = ["John", "Smith"]
self.assertEqual(__, first_name)
self.assertEqual(__, last_name)
def test_parallel_assignments_with_extra_values(self):
title, *first_names, last_name = ["Sir", "Ricky", "Bobby", "Worthington"]
self.assertEqual(__, title)
self.assertEqual(__, first_names)
self.assertEqual(__, last_name)
def test_parallel_assignments_with_sublists(self):
first_name, last_name = [["Willie", "Rae"], "Johnson"]
self.assertEqual(__, first_name)
self.assertEqual(__, last_name)
def test_swapping_with_parallel_assignment(self):
first_name = "Roy"
last_name = "Rob"
first_name, last_name = last_name, first_name
self.assertEqual(__, first_name)
self.assertEqual(__, last_name)
|
tnemis/staging-server
|
refs/heads/master
|
students/migrations/0002_initial.py
|
5
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Child_detail'
db.create_table(u'students_child_detail', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('aadhaar_eid_number', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('aadhaar_uid_number', self.gf('django.db.models.fields.BigIntegerField')(null=True, blank=True)),
('photograph', self.gf('imagekit.models.fields.ProcessedImageField')(max_length=100, null=True, blank=True)),
('gender', self.gf('django.db.models.fields.CharField')(max_length=15)),
('dob', self.gf('django.db.models.fields.DateField')()),
('community', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Community'])),
('religion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Religion'])),
('mothertounge', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Language'])),
('phone_number', self.gf('django.db.models.fields.BigIntegerField')(null=True, blank=True)),
('child_differently_abled', self.gf('django.db.models.fields.CharField')(max_length=3)),
('differently_abled', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Differently_abled'], null=True, blank=True)),
('child_disadvantaged_group', self.gf('django.db.models.fields.CharField')(max_length=3)),
('disadvantaged_group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Disadvantaged_group'], null=True, blank=True)),
('subcaste', self.gf('smart_selects.db_fields.ChainedForeignKey')(to=orm['baseapp.Sub_Castes'], null=True, blank=True)),
('nationality', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Nationality'])),
('house_address', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('native_district', self.gf('django.db.models.fields.CharField')(max_length=50)),
('pin_code', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('blood_group', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('mother_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('mother_occupation', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('mother_income', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True)),
('father_name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('father_occupation', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('father_income', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True)),
('class_studying', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Class_Studying'])),
('group_code', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Group_code'], null=True, blank=True)),
('attendance_status', self.gf('django.db.models.fields.CharField')(max_length=30)),
('sport_participation', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('education_medium', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Education_medium'])),
('district', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.District'])),
('block', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Block'])),
('zone', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Zone'])),
('habitation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Habitation'])),
('unique_id_no', self.gf('django.db.models.fields.BigIntegerField')(unique=True, null=True, blank=True)),
('school', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.School'])),
('staff_id', self.gf('django.db.models.fields.CharField')(max_length=30)),
('bank', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Bank'], null=True, blank=True)),
('bank_account_no', self.gf('django.db.models.fields.BigIntegerField')(null=True, blank=True)),
('govt_schemes_status', self.gf('django.db.models.fields.CharField')(max_length=5)),
('academic_year', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Academic_Year'])),
('transfer_flag', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True)),
('transfer_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('modification_flag', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True)),
('verification_flag', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True, blank=True)),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'students', ['Child_detail'])
# Adding M2M table for field schemes on 'Child_detail'
m2m_table_name = db.shorten_name(u'students_child_detail_schemes')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('child_detail', models.ForeignKey(orm[u'students.child_detail'], null=False)),
('schemes', models.ForeignKey(orm[u'baseapp.schemes'], null=False))
))
db.create_unique(m2m_table_name, ['child_detail_id', 'schemes_id'])
# Adding model 'Child_family_detail'
db.create_table(u'students_child_family_detail', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('child_key', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['students.Child_detail'])),
('block', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.Block'])),
('member_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('member_relationship', self.gf('django.db.models.fields.CharField')(max_length=20)),
('member_age', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('member_qualification', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('member_employed', self.gf('django.db.models.fields.CharField')(max_length=5)),
('member_income', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('staff_id', self.gf('django.db.models.fields.CharField')(max_length=30)),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'students', ['Child_family_detail'])
# Adding model 'Child_Transfer_History'
db.create_table(u'students_child_transfer_history', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('child_key', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['students.Child_detail'])),
('old_school', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['baseapp.School'])),
('tc_issue_date', self.gf('django.db.models.fields.DateField')()),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'students', ['Child_Transfer_History'])
def backwards(self, orm):
# Deleting model 'Child_detail'
db.delete_table(u'students_child_detail')
# Removing M2M table for field schemes on 'Child_detail'
db.delete_table(db.shorten_name(u'students_child_detail_schemes'))
# Deleting model 'Child_family_detail'
db.delete_table(u'students_child_family_detail')
# Deleting model 'Child_Transfer_History'
db.delete_table(u'students_child_transfer_history')
models = {
u'baseapp.academic_year': {
'Meta': {'object_name': 'Academic_Year'},
'academic_year': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'baseapp.bank': {
'Meta': {'object_name': 'Bank'},
'bank': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'baseapp.block': {
'Meta': {'object_name': 'Block'},
'block_code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'block_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'district': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.District']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'baseapp.class_studying': {
'Meta': {'object_name': 'Class_Studying'},
'class_studying': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'baseapp.community': {
'Meta': {'object_name': 'Community'},
'community_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'community_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'baseapp.differently_abled': {
'Meta': {'object_name': 'Differently_abled'},
'da_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'da_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'baseapp.disadvantaged_group': {
'Meta': {'object_name': 'Disadvantaged_group'},
'dis_group_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'baseapp.district': {
'Meta': {'object_name': 'District'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'district_code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'district_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'baseapp.education_medium': {
'Meta': {'object_name': 'Education_medium'},
'education_medium': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'baseapp.group_code': {
'Meta': {'object_name': 'Group_code'},
'group_code': ('django.db.models.fields.PositiveIntegerField', [], {}),
'group_description': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
'group_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'baseapp.habitation': {
'Meta': {'object_name': 'Habitation'},
'block': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Block']"}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zone': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['baseapp.Zone']"})
},
u'baseapp.language': {
'Meta': {'object_name': 'Language'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'baseapp.management': {
'Meta': {'object_name': 'Management'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'management': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'baseapp.nationality': {
'Meta': {'object_name': 'Nationality'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nationality': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'baseapp.religion': {
'Meta': {'object_name': 'Religion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'religion_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'baseapp.schemes': {
'Meta': {'object_name': 'Schemes'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scheme_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'scheme_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'baseapp.school': {
'Meta': {'object_name': 'School'},
'block': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['baseapp.Block']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'district': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.District']"}),
'habitation': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['baseapp.Habitation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'management': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Management']"}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'school_code': ('django.db.models.fields.BigIntegerField', [], {}),
'school_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'student_id_count': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'baseapp.sub_castes': {
'Meta': {'object_name': 'Sub_Castes'},
'caste_code': ('django.db.models.fields.PositiveIntegerField', [], {}),
'caste_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Community']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'baseapp.zone': {
'Meta': {'object_name': 'Zone'},
'block': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Block']"}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zone_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Zone_type']"})
},
u'baseapp.zone_type': {
'Meta': {'object_name': 'Zone_type'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'zone_type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'students.child_detail': {
'Meta': {'object_name': 'Child_detail'},
'aadhaar_eid_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'aadhaar_uid_number': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'academic_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Academic_Year']"}),
'attendance_status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'bank': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Bank']", 'null': 'True', 'blank': 'True'}),
'bank_account_no': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'block': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Block']"}),
'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'child_differently_abled': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'child_disadvantaged_group': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'class_studying': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Class_Studying']"}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Community']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'differently_abled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Differently_abled']", 'null': 'True', 'blank': 'True'}),
'disadvantaged_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Disadvantaged_group']", 'null': 'True', 'blank': 'True'}),
'district': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.District']"}),
'dob': ('django.db.models.fields.DateField', [], {}),
'education_medium': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Education_medium']"}),
'father_income': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'father_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'father_occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'govt_schemes_status': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'group_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Group_code']", 'null': 'True', 'blank': 'True'}),
'habitation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Habitation']"}),
'house_address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_flag': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mother_income': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'mother_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'mother_occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'mothertounge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Language']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'nationality': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Nationality']"}),
'native_district': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'phone_number': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'photograph': ('imagekit.models.fields.ProcessedImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pin_code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'religion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Religion']"}),
'schemes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['baseapp.Schemes']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.School']"}),
'sport_participation': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'staff_id': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'subcaste': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['baseapp.Sub_Castes']", 'null': 'True', 'blank': 'True'}),
'transfer_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'transfer_flag': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'unique_id_no': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'verification_flag': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'zone': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Zone']"})
},
u'students.child_family_detail': {
'Meta': {'object_name': 'Child_family_detail'},
'block': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.Block']"}),
'child_key': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.Child_detail']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'member_employed': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'member_income': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'member_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'member_qualification': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'member_relationship': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'staff_id': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'students.child_transfer_history': {
'Meta': {'object_name': 'Child_Transfer_History'},
'child_key': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.Child_detail']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'old_school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['baseapp.School']"}),
'tc_issue_date': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['students']
|
laszlocsomor/tensorflow
|
refs/heads/master
|
tensorflow/contrib/kfac/python/ops/estimator.py
|
2
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the high-level Fisher estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import itertools
import math
import numpy as np
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.util import nest
class _DeviceContextGenerator(object):
"""Class for generating device contexts in a round-robin fashion."""
def __init__(self, devices):
"""Creates a _DeviceContextGenerator object.
Example usage:
```python
dcg = _DeviceContextGenerator(['/gpu:0', 'gpu:1'])
with dcg():
# All operations in this context will be placed on GPU 0
...
with dcg():
# All operations in this context will be placed on GPU 1
...
```
Args:
devices: An iterable of device strings (or None). Successive calls to
__call__ will give contexts which place devices on these devices in
a round-robin fashion.
"""
self._cycle = None if devices is None else itertools.cycle(devices)
@contextlib.contextmanager
def __call__(self):
"""Returns a context manager specifying the default device."""
if self._cycle is None:
yield
else:
with tf_ops.device(next(self._cycle)):
yield
class FisherEstimator(object):
"""Fisher estimator class supporting various approximations of the Fisher."""
def __init__(self,
variables,
cov_ema_decay,
damping,
layer_collection,
estimation_mode="gradients",
colocate_gradients_with_ops=False,
cov_devices=None,
inv_devices=None):
"""Create a FisherEstimator object.
Args:
variables: A list of the variables for which to estimate the Fisher. This
must match the variables registered in layer_collection (if it is not
None).
cov_ema_decay: The decay factor used when calculating the covariance
estimate moving averages.
damping: The damping factor used to stabilize training due to errors in
the local approximation with the Fisher information matrix, and to
regularize the update direction by making it closer to the gradient.
(Higher damping means the update looks more like a standard gradient
update - see Tikhonov regularization.)
layer_collection: The layer collection object, which holds the fisher
blocks, kronecker factors, and losses associated with the
graph.
estimation_mode: The type of estimator to use for the Fishers. Can be
'gradients', 'empirical', 'curvature_propagation', or 'exact'.
(Default: 'gradients'). 'gradients' is the basic estimation approach
from the original K-FAC paper. 'empirical' computes the 'empirical'
Fisher information matrix (which uses the data's distribution for the
targets, as opposed to the true Fisher which uses the model's
distribution) and requires that each registered loss have specified
targets. 'curvature_propagation' is a method which estimates the
Fisher using self-products of random 1/-1 vectors times "half-factors"
of the Fisher, as described here: https://arxiv.org/abs/1206.6464 .
Finally, 'exact' is the obvious generalization of Curvature
Propagation to compute the exact Fisher (modulo any additional
diagonal or Kronecker approximations) by looping over one-hot vectors
for each coordinate of the output instead of using 1/-1 vectors. It
is more expensive to compute than the other three options by a factor
equal to the output dimension, roughly speaking.
colocate_gradients_with_ops: Whether we should request gradients be
colocated with their respective ops.
cov_devices: Iterable of device strings (e.g. '/gpu:0'). Covariance
computations will be placed on these devices in a round-robin fashion.
Can be None, which means that no devices are specified.
inv_devices: Iterable of device strings (e.g. '/gpu:0'). Inversion
computations will be placed on these devices in a round-robin fashion.
Can be None, which means that no devices are specified.
Raises:
ValueError: If no losses have been registered with layer_collection.
"""
self._variables = variables
self._damping = damping
self._estimation_mode = estimation_mode
self._layers = layer_collection
self._layers.create_subgraph()
self._check_registration(variables)
self._gradient_fns = {
"gradients": self._get_grads_lists_gradients,
"empirical": self._get_grads_lists_empirical,
"curvature_prop": self._get_grads_lists_curvature_prop,
"exact": self._get_grads_lists_exact
}
self._colocate_gradients_with_ops = colocate_gradients_with_ops
self._cov_device_context_generator = _DeviceContextGenerator(cov_devices)
if inv_devices == cov_devices:
self._inv_device_context_generator = self._cov_device_context_generator
else:
self._inv_device_context_generator = _DeviceContextGenerator(inv_devices)
setup = self._setup(cov_ema_decay)
self.cov_update_op, self.inv_update_op, self.inv_updates_dict = setup
@property
def variables(self):
return self._variables
@property
def damping(self):
return self._damping
def _apply_transformation(self, vecs_and_vars, transform):
"""Applies an block-wise transformation to the corresponding vectors.
Args:
vecs_and_vars: List of (vector, variable) pairs.
transform: A function of the form f(fb, vec), where vec is the vector
to transform and fb is its corresponding block in the matrix, that
returns the transformed vector.
Returns:
A list of (transformed vector, var) pairs in the same order as
vecs_and_vars.
"""
vecs = utils.SequenceDict((var, vec) for vec, var in vecs_and_vars)
trans_vecs = utils.SequenceDict()
for params, fb in self._layers.fisher_blocks.items():
trans_vecs[params] = transform(fb, vecs[params])
return [(trans_vecs[var], var) for _, var in vecs_and_vars]
def multiply_inverse(self, vecs_and_vars):
"""Multiplies the vecs by the corresponding (damped) inverses of the blocks.
Args:
vecs_and_vars: List of (vector, variable) pairs.
Returns:
A list of (transformed vector, var) pairs in the same order as
vecs_and_vars.
"""
return self._apply_transformation(vecs_and_vars,
lambda fb, vec: fb.multiply_inverse(vec))
def multiply(self, vecs_and_vars):
"""Multiplies the vectors by the corresponding (damped) blocks.
Args:
vecs_and_vars: List of (vector, variable) pairs.
Returns:
A list of (transformed vector, var) pairs in the same order as
vecs_and_vars.
"""
return self._apply_transformation(vecs_and_vars,
lambda fb, vec: fb.multiply(vec))
def _check_registration(self, variables):
"""Checks that all variable uses have been registered properly.
Args:
variables: List of variables.
Raises:
ValueError: If any registered variables are not included in the list.
ValueError: If any variable in the list is not registered.
ValueError: If any variable in the list is registered with the wrong
number of "uses" in the subgraph recorded (vs the number of times that
variable is actually used in the subgraph).
"""
# Note that overlapping parameters (i.e. those that share variables) will
# be caught by layer_collection.LayerParametersDict during registration.
reg_use_map = self._layers.get_use_count_map()
error_messages = []
for var in variables:
total_uses = self._layers.subgraph.variable_uses(var)
reg_uses = reg_use_map[var]
if reg_uses == 0:
error_messages.append("Variable {} not registered.".format(var))
elif (not math.isinf(reg_uses)) and reg_uses != total_uses:
error_messages.append(
"Variable {} registered with wrong number of uses ({} "
"registrations vs {} uses).".format(var, reg_uses, total_uses))
num_get_vars = len(reg_use_map)
if num_get_vars > len(variables):
error_messages.append("{} registered variables were not included in list."
.format(num_get_vars - len(variables)))
if error_messages:
error_messages = [
"Found the following errors with variable registration:"
] + error_messages
raise ValueError("\n\t".join(error_messages))
def _setup(self, cov_ema_decay):
"""Sets up the various operations.
Args:
cov_ema_decay: The decay factor used when calculating the covariance
estimate moving averages.
Returns:
A triple (covs_update_op, invs_update_op, inv_updates_dict), where
covs_update_op is the grouped Op to update all the covariance estimates,
invs_update_op is the grouped Op to update all the inverses, and
inv_updates_dict is a dict mapping Op names to individual inverse updates.
Raises:
ValueError: If estimation_mode was improperly specified at construction.
"""
fisher_blocks_list = self._layers.get_blocks()
tensors_to_compute_grads = [
fb.tensors_to_compute_grads() for fb in fisher_blocks_list
]
try:
grads_lists = self._gradient_fns[self._estimation_mode](
tensors_to_compute_grads)
except KeyError:
raise ValueError("Unrecognized value {} for estimation_mode.".format(
self._estimation_mode))
# TODO(b/68033310): This loop round-robins the "concat" operations which
# gather the inputs for the cov_updates. In future, we might do these
# computations locally then communicate the results, which would require a
# modification to this code.
for grads_list, fb in zip(grads_lists, fisher_blocks_list):
with self._cov_device_context_generator():
fb.instantiate_factors(grads_list, self.damping)
cov_updates = [
factor.make_covariance_update_op(cov_ema_decay)
for factor in self._layers.get_factors()
]
inv_updates = {op.name: op for op in self._get_all_inverse_update_ops()}
return control_flow_ops.group(*cov_updates), control_flow_ops.group(
*inv_updates.values()), inv_updates
def _get_all_inverse_update_ops(self):
for factor in self._layers.get_factors():
with self._inv_device_context_generator():
for op in factor.make_inverse_update_ops():
yield op
def _get_grads_lists_gradients(self, tensors):
grads_flat = gradients_impl.gradients(
self._layers.total_sampled_loss(),
nest.flatten(tensors),
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all = nest.pack_sequence_as(tensors, grads_flat)
return tuple((grad,) for grad in grads_all)
def _get_grads_lists_empirical(self, tensors):
grads_flat = gradients_impl.gradients(
self._layers.total_loss(),
nest.flatten(tensors),
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all = nest.pack_sequence_as(tensors, grads_flat)
return tuple((grad,) for grad in grads_all)
def _get_transformed_random_signs(self):
transformed_random_signs = []
for loss in self._layers.losses:
transformed_random_signs.append(
loss.multiply_fisher_factor(
utils.generate_random_signs(loss.fisher_factor_inner_shape)))
return transformed_random_signs
def _get_grads_lists_curvature_prop(self, tensors):
loss_inputs = list(loss.inputs for loss in self._layers.losses)
transformed_random_signs = self._get_transformed_random_signs()
grads_flat = gradients_impl.gradients(
nest.flatten(loss_inputs),
nest.flatten(tensors),
grad_ys=nest.flatten(transformed_random_signs),
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all = nest.pack_sequence_as(tensors, grads_flat)
return tuple((grad,) for grad in grads_all)
def _get_grads_lists_exact(self, tensors):
"""Returns a list of all gradients, computing them exactly.
Args:
tensors: Tensors for which to compute gradients.
"""
# Loop over all coordinates of all losses.
grads_all = []
for loss in self._layers.losses:
for index in np.ndindex(*loss.fisher_factor_inner_static_shape[1:]):
transformed_one_hot = loss.multiply_fisher_factor_replicated_one_hot(
index)
grads_flat = gradients_impl.gradients(
loss.inputs,
nest.flatten(tensors),
grad_ys=transformed_one_hot,
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all.append(nest.pack_sequence_as(tensors, grads_flat))
return zip(*grads_all)
|
pymedusa/SickRage
|
refs/heads/master
|
ext2/libfuturize/fixes/fix_metaclass.py
|
61
|
# coding: utf-8
"""Fixer for __metaclass__ = X -> (future.utils.with_metaclass(X)) methods.
The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# This is a derived work of Lib/lib2to3/fixes/fix_metaclass.py under the
# copyright of the Python Software Foundation, licensed under the Python
# Software Foundation License 2.
#
# Copyright notice:
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013 Python Software Foundation. All rights reserved.
#
# Full license text: http://docs.python.org/3.4/license.html
# Author: Jack Diederich, Daniel Neuhäuser
# Local imports
from lib2to3 import fixer_base
from lib2to3.pygram import token
from lib2to3.fixer_util import Name, syms, Node, Leaf, touch_import, Call, \
String, Comma, parenthesize
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the preferred format, do nothing
return
# !%@#! oneliners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything efter the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == u'__metaclass__':
# We found a assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.prefix = u''
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, u')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, u'('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.prefix
# Was: touch_import(None, u'future.utils', node)
touch_import(u'future.utils', u'with_metaclass', node)
metaclass = last_metaclass.children[0].children[2].clone()
metaclass.prefix = u''
arguments = [metaclass]
if arglist.children:
if len(arglist.children) == 1:
base = arglist.children[0].clone()
base.prefix = u' '
else:
# Unfortunately six.with_metaclass() only allows one base
# class, so we have to dynamically generate a base class if
# there is more than one.
bases = parenthesize(arglist.clone())
bases.prefix = u' '
base = Call(Name('type'), [
String("'NewBase'"),
Comma(),
bases,
Comma(),
Node(
syms.atom,
[Leaf(token.LBRACE, u'{'), Leaf(token.RBRACE, u'}')],
prefix=u' '
)
], prefix=u' ')
arguments.extend([Comma(), base])
arglist.replace(Call(
Name(u'with_metaclass', prefix=arglist.prefix),
arguments
))
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, u'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, u'\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, u'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, u'\n'))
|
faust64/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/net_config.py
|
137
|
#
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
github4ry/pathomx
|
refs/heads/master
|
pathomx/plugins/merge/loader.py
|
2
|
import os
import copy
import numpy as np
import pathomx.ui as ui
import pathomx.db as db
import pathomx.utils as utils
from pathomx.data import DataDefinition
from pathomx.utils import UnicodeReader, UnicodeWriter
from pathomx.plugins import FilterPlugin
class MergeApp(ui.IPythonApp):
notebook = 'merge.ipynb'
shortname = 'merge'
legacy_outputs = {'output': 'output_data'}
def __init__(self, *args, **kwargs):
super(MergeApp, self).__init__(*args, **kwargs)
self.data.add_input('input_1') # Add input slot
self.data.add_input('input_2') # Add input slot
self.data.add_output('output_data') # Add output slot
# Setup data consumer options
self.data.consumer_defs.extend([
DataDefinition('input_1', {
'labels_n': (None, '>0'),
'entities_t': (None, None),
}),
DataDefinition('input_2', {
'labels_n': (None, '>0'),
'entities_t': (None, None),
}),
]
)
class Merge(FilterPlugin):
def __init__(self, *args, **kwargs):
super(Merge, self).__init__(*args, **kwargs)
MergeApp.plugin = self
self.register_app_launcher(MergeApp)
|
gauribhoite/personfinder
|
refs/heads/master
|
env/google_appengine/lib/django-1.5/django/views/generic/dates.py
|
107
|
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.views.generic.base import View
from django.views.generic.detail import BaseDetailView, SingleObjectTemplateResponseMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
class YearMixin(object):
"""
Mixin for views manipulating year-based data.
"""
year_format = '%Y'
year = None
def get_year_format(self):
"""
Get a year format string in strptime syntax to be used to parse the
year from url variables.
"""
return self.year_format
def get_year(self):
"""
Return the year for which this view should display data.
"""
year = self.year
if year is None:
try:
year = self.kwargs['year']
except KeyError:
try:
year = self.request.GET['year']
except KeyError:
raise Http404(_("No year specified"))
return year
def get_next_year(self, date):
"""
Get the next valid year.
"""
return _get_next_prev(self, date, is_previous=False, period='year')
def get_previous_year(self, date):
"""
Get the previous valid year.
"""
return _get_next_prev(self, date, is_previous=True, period='year')
def _get_next_year(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date.replace(year=date.year + 1, month=1, day=1)
def _get_current_year(self, date):
"""
Return the start date of the current interval.
"""
return date.replace(month=1, day=1)
class MonthMixin(object):
"""
Mixin for views manipulating month-based data.
"""
month_format = '%b'
month = None
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_month(self):
"""
Return the month for which this view should display data.
"""
month = self.month
if month is None:
try:
month = self.kwargs['month']
except KeyError:
try:
month = self.request.GET['month']
except KeyError:
raise Http404(_("No month specified"))
return month
def get_next_month(self, date):
"""
Get the next valid month.
"""
return _get_next_prev(self, date, is_previous=False, period='month')
def get_previous_month(self, date):
"""
Get the previous valid month.
"""
return _get_next_prev(self, date, is_previous=True, period='month')
def _get_next_month(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
if date.month == 12:
return date.replace(year=date.year + 1, month=1, day=1)
else:
return date.replace(month=date.month + 1, day=1)
def _get_current_month(self, date):
"""
Return the start date of the previous interval.
"""
return date.replace(day=1)
class DayMixin(object):
"""
Mixin for views manipulating day-based data.
"""
day_format = '%d'
day = None
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the day
from url variables.
"""
return self.day_format
def get_day(self):
"""
Return the day for which this view should display data.
"""
day = self.day
if day is None:
try:
day = self.kwargs['day']
except KeyError:
try:
day = self.request.GET['day']
except KeyError:
raise Http404(_("No day specified"))
return day
def get_next_day(self, date):
"""
Get the next valid day.
"""
return _get_next_prev(self, date, is_previous=False, period='day')
def get_previous_day(self, date):
"""
Get the previous valid day.
"""
return _get_next_prev(self, date, is_previous=True, period='day')
def _get_next_day(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=1)
def _get_current_day(self, date):
"""
Return the start date of the current interval.
"""
return date
class WeekMixin(object):
"""
Mixin for views manipulating week-based data.
"""
week_format = '%U'
week = None
def get_week_format(self):
"""
Get a week format string in strptime syntax to be used to parse the
week from url variables.
"""
return self.week_format
def get_week(self):
"""
Return the week for which this view should display data
"""
week = self.week
if week is None:
try:
week = self.kwargs['week']
except KeyError:
try:
week = self.request.GET['week']
except KeyError:
raise Http404(_("No week specified"))
return week
def get_next_week(self, date):
"""
Get the next valid week.
"""
return _get_next_prev(self, date, is_previous=False, period='week')
def get_previous_week(self, date):
"""
Get the previous valid week.
"""
return _get_next_prev(self, date, is_previous=True, period='week')
def _get_next_week(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=7 - self._get_weekday(date))
def _get_current_week(self, date):
"""
Return the start date of the current interval.
"""
return date - datetime.timedelta(self._get_weekday(date))
def _get_weekday(self, date):
"""
Return the weekday for a given date.
The first day according to the week format is 0 and the last day is 6.
"""
week_format = self.get_week_format()
if week_format == '%W': # week starts on Monday
return date.weekday()
elif week_format == '%U': # week starts on Sunday
return (date.weekday() + 1) % 7
else:
raise ValueError("unknown week format: %s" % week_format)
class DateMixin(object):
"""
Mixin class for views manipulating date-based data.
"""
date_field = None
allow_future = False
def get_date_field(self):
"""
Get the name of the date field to be used to filter by.
"""
if self.date_field is None:
raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_allow_future(self):
"""
Returns `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
# Note: the following three methods only work in subclasses that also
# inherit SingleObjectMixin or MultipleObjectMixin.
@cached_property
def uses_datetime_field(self):
"""
Return `True` if the date field is a `DateTimeField` and `False`
if it's a `DateField`.
"""
model = self.get_queryset().model if self.model is None else self.model
field = model._meta.get_field(self.get_date_field())
return isinstance(field, models.DateTimeField)
def _make_date_lookup_arg(self, value):
"""
Convert a date into a datetime when the date field is a DateTimeField.
When time zone support is enabled, `date` is assumed to be in the
current time zone, so that displayed items are consistent with the URL.
"""
if self.uses_datetime_field:
value = datetime.datetime.combine(value, datetime.time.min)
if settings.USE_TZ:
value = timezone.make_aware(value, timezone.get_current_timezone())
return value
def _make_single_date_lookup(self, date):
"""
Get the lookup kwargs for filtering on a single date.
If the date field is a DateTimeField, we can't just filter on
date_field=date because that doesn't take the time into account.
"""
date_field = self.get_date_field()
if self.uses_datetime_field:
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(date + datetime.timedelta(days=1))
return {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
else:
# Skip self._make_date_lookup_arg, it's a no-op in this branch.
return {date_field: date}
class BaseDateListView(MultipleObjectMixin, DateMixin, View):
"""
Abstract base class for date-based views displaying a list of objects.
"""
allow_empty = False
date_list_period = 'year'
def get(self, request, *args, **kwargs):
self.date_list, self.object_list, extra_context = self.get_dated_items()
context = self.get_context_data(object_list=self.object_list,
date_list=self.date_list)
context.update(extra_context)
return self.render_to_response(context)
def get_dated_items(self):
"""
Obtain the list of dates and items.
"""
raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')
def get_dated_queryset(self, ordering=None, **lookup):
"""
Get a queryset properly filtered according to `allow_future` and any
extra lookup kwargs.
"""
qs = self.get_queryset().filter(**lookup)
date_field = self.get_date_field()
allow_future = self.get_allow_future()
allow_empty = self.get_allow_empty()
paginate_by = self.get_paginate_by(qs)
if ordering is not None:
qs = qs.order_by(ordering)
if not allow_future:
now = timezone.now() if self.uses_datetime_field else timezone_today()
qs = qs.filter(**{'%s__lte' % date_field: now})
if not allow_empty:
# When pagination is enabled, it's better to do a cheap query
# than to load the unpaginated queryset in memory.
is_empty = len(qs) == 0 if paginate_by is None else not qs.exists()
if is_empty:
raise Http404(_("No %(verbose_name_plural)s available") % {
'verbose_name_plural': force_text(qs.model._meta.verbose_name_plural)
})
return qs
def get_date_list_period(self):
"""
Get the aggregation period for the list of dates: 'year', 'month', or 'day'.
"""
return self.date_list_period
def get_date_list(self, queryset, date_type=None, ordering='ASC'):
"""
Get a date list by calling `queryset.dates()`, checking along the way
for empty lists that aren't allowed.
"""
date_field = self.get_date_field()
allow_empty = self.get_allow_empty()
if date_type is None:
date_type = self.get_date_list_period()
date_list = queryset.dates(date_field, date_type, ordering)
if date_list is not None and not date_list and not allow_empty:
name = force_text(queryset.model._meta.verbose_name_plural)
raise Http404(_("No %(verbose_name_plural)s available") %
{'verbose_name_plural': name})
return date_list
class BaseArchiveIndexView(BaseDateListView):
"""
Base class for archives of date-based items.
Requires a response mixin.
"""
context_object_name = 'latest'
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
qs = self.get_dated_queryset(ordering='-%s' % self.get_date_field())
date_list = self.get_date_list(qs, ordering='DESC')
if not date_list:
qs = qs.none()
return (date_list, qs, {})
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
"""
Top-level archive of date-based items.
"""
template_name_suffix = '_archive'
class BaseYearArchiveView(YearMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
date_list_period = 'month'
make_object_list = False
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_year(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(ordering='-%s' % date_field, **lookup_kwargs)
date_list = self.get_date_list(qs)
if not self.get_make_object_list():
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
qs = qs.none()
return (date_list, qs, {
'year': date,
'next_year': self.get_next_year(date),
'previous_year': self.get_previous_year(date),
})
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_year'
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
date_list_period = 'day'
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_month(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
return (date_list, qs, {
'month': date,
'next_month': self.get_next_month(date),
'previous_month': self.get_previous_month(date),
})
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_month'
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView):
"""
List of objects published in a given week.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
week = self.get_week()
date_field = self.get_date_field()
week_format = self.get_week_format()
week_start = {
'%W': '1',
'%U': '0',
}[week_format]
date = _date_from_string(year, self.get_year_format(),
week_start, '%w',
week, week_format)
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_week(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'week': date,
'next_week': self.get_next_week(date),
'previous_week': self.get_previous_week(date),
})
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
"""
List of objects published in a given week.
"""
template_name_suffix = '_archive_week'
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView):
"""
List of objects published on a given day.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayArchiveView can be trivial.
"""
lookup_kwargs = self._make_single_date_lookup(date)
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'day': date,
'previous_day': self.get_previous_day(date),
'next_day': self.get_next_day(date),
'previous_month': self.get_previous_month(date),
'next_month': self.get_next_month(date)
})
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
"""
List of objects published on a given day.
"""
template_name_suffix = "_archive_day"
class BaseTodayArchiveView(BaseDayArchiveView):
"""
List of objects published today.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
return self._get_dated_items(datetime.date.today())
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
"""
List of objects published today.
"""
template_name_suffix = "_archive_day"
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
def get_object(self, queryset=None):
"""
Get the object this request displays.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
# Use a custom queryset if provided
qs = queryset or self.get_queryset()
if not self.get_allow_future() and date > datetime.date.today():
raise Http404(_("Future %(verbose_name_plural)s not available because %(class_name)s.allow_future is False.") % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
'class_name': self.__class__.__name__,
})
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
lookup_kwargs = self._make_single_date_lookup(date)
qs = qs.filter(**lookup_kwargs)
return super(BaseDetailView, self).get_object(queryset=qs)
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
template_name_suffix = '_detail'
def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'):
"""
Helper: get a datetime.date object given a format string and a year,
month, and day (only year is mandatory). Raise a 404 for an invalid date.
"""
format = delim.join((year_format, month_format, day_format))
datestr = delim.join((year, month, day))
try:
return datetime.datetime.strptime(datestr, format).date()
except ValueError:
raise Http404(_("Invalid date string '%(datestr)s' given format '%(format)s'") % {
'datestr': datestr,
'format': format,
})
def _get_next_prev(generic_view, date, is_previous, period):
"""
Helper: Get the next or the previous valid date. The idea is to allow
links on month/day views to never be 404s by never providing a date
that'll be invalid for the given view.
This is a bit complicated since it handles different intervals of time,
hence the coupling to generic_view.
However in essence the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day/week/month,
reguardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive result
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
get_current = getattr(generic_view, '_get_current_%s' % period)
get_next = getattr(generic_view, '_get_next_%s' % period)
# Bounds of the current interval
start, end = get_current(date), get_next(date)
# If allow_empty is True, the naive result will be valid
if allow_empty:
if is_previous:
result = get_current(start - datetime.timedelta(days=1))
else:
result = end
if allow_future or result <= timezone_today():
return result
else:
return None
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on whether we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lt' % date_field: generic_view._make_date_lookup_arg(start)}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(end)}
ordering = date_field
# Filter out objects in the future if appropriate.
if not allow_future:
# Fortunately, to match the implementation of allow_future,
# we need __lte, which doesn't conflict with __lt above.
if generic_view.uses_datetime_field:
now = timezone.now()
else:
now = timezone_today()
lookup['%s__lte' % date_field] = now
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except IndexError:
return None
# Convert datetimes to dates in the current time zone.
if generic_view.uses_datetime_field:
if settings.USE_TZ:
result = timezone.localtime(result)
result = result.date()
# Return the first day of the period.
return get_current(result)
def timezone_today():
"""
Return the current date in the current time zone.
"""
if settings.USE_TZ:
return timezone.localtime(timezone.now()).date()
else:
return datetime.date.today()
|
longde123/MultiversePlatform
|
refs/heads/master
|
lib/IPCE/Lib/BeautifulSoup.py
|
307
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it. The BeautifulSoup class
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.0.4"
__copyright__ = "Copyright (c) 2004-2007 Leonard Richardson"
__license__ = "PSF"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
#This hack makes Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before after Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return self.__str__(None)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
XML_SPECIAL_CHARS_TO_ENTITIES = { "'" : "squot",
'"' : "quote",
"&" : "amp",
"<" : "lt",
">" : "gt" }
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
# This can't happen naturally, but it can happen
# if you modify an attribute value after parsing.
if "'" in val:
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = re.sub("([<>]|&(?![^\s]+;))",
lambda x: "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";",
val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Utility methods
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.contents.append(tag)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
for i in range(0, len(self.contents)):
yield self.contents[i]
raise StopIteration
def recursiveChildGenerator(self):
stack = [(self, 0)]
while stack:
tag, start = stack.pop()
if isinstance(tag, Tag):
for i in range(start, len(tag.contents)):
a = tag.contents[i]
yield a
if isinstance(a, Tag) and tag.contents:
if i < len(tag.contents) - 1:
stack.append((tag, i+1))
stack.append((a, 0))
break
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif isList(matchAgainst):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isintance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
XML_ENTITY_LIST = {}
for i in Tag.XML_SPECIAL_CHARS_TO_ENTITIES.values():
XML_ENTITY_LIST[i] = True
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed()
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def _feed(self, inDocumentEncoding=None):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.find('start_') == 0 or methodName.find('end_') == 0 \
or methodName.find('do_') == 0:
return SGMLParser.__getattr__(self, methodName)
elif methodName.find('__') != 0:
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = ''.join(self.currentData)
if not currentData.strip():
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar<p> should pop to 'p', not 'b'.
<p>Foo<table>Bar<p> should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar<p> should pop to 'tr', not 'p'.
<p>Foo<b>Bar<p> should pop to 'p', not 'b'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = "xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities in [self.HTML_ENTITIES,
self.XML_ENTITIES]:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML entity references to the corresponding Unicode
characters."""
data = None
if self.convertEntities == self.HTML_ENTITIES or \
(self.convertEntities == self.XML_ENTITIES and \
self.XML_ENTITY_LIST.get(ref)):
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data:
data = '&%s;' % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
QUOTE_TAGS = {'script': None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)")
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if getattr(self, 'declaredHTMLEncoding') or \
(self.originalEncoding == self.fromEncoding):
# This is our second pass through the document, or
# else an encoding was specified explicitly and it
# worked. Rewrite the meta tag.
newAttr = self.CHARSET_RE.sub\
(lambda(match):match.group(1) +
"%SOUP-ENCODING%", value)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the new information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except:
chardet = None
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml'):
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
xml_encoding_match = re.compile \
('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')\
.match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except LookupError:
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin.read())
print soup.prettify()
|
ar7z1/ansible
|
refs/heads/devel
|
test/units/cli/test_cli.py
|
16
|
# (c) 2017, Adrian Likins <alikins@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch, MagicMock
from units.mock.loader import DictDataLoader
from ansible.release import __version__
from ansible.parsing import vault
from ansible import cli
class TestCliVersion(unittest.TestCase):
def test_version(self):
ver = cli.CLI.version('ansible-cli-test')
self.assertIn('ansible-cli-test', ver)
self.assertIn('python version', ver)
def test_version_info(self):
version_info = cli.CLI.version_info()
self.assertEqual(version_info['string'], __version__)
def test_version_info_gitinfo(self):
version_info = cli.CLI.version_info(gitinfo=True)
self.assertIn('python version', version_info['string'])
class TestCliBuildVaultIds(unittest.TestCase):
def setUp(self):
self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=True)
self.mock_isatty = self.tty_patcher.start()
def tearDown(self):
self.tty_patcher.stop()
def test(self):
res = cli.CLI.build_vault_ids(['foo@bar'])
self.assertEqual(res, ['foo@bar'])
def test_create_new_password_no_vault_id(self):
res = cli.CLI.build_vault_ids([], create_new_password=True)
self.assertEqual(res, ['default@prompt_ask_vault_pass'])
def test_create_new_password_no_vault_id_no_auto_prompt(self):
res = cli.CLI.build_vault_ids([], auto_prompt=False, create_new_password=True)
self.assertEqual(res, [])
def test_no_vault_id_no_auto_prompt(self):
# similate 'ansible-playbook site.yml' with out --ask-vault-pass, should not prompt
res = cli.CLI.build_vault_ids([], auto_prompt=False)
self.assertEqual(res, [])
def test_no_vault_ids_auto_prompt(self):
# create_new_password=False
# simulate 'ansible-vault edit encrypted.yml'
res = cli.CLI.build_vault_ids([], auto_prompt=True)
self.assertEqual(res, ['default@prompt_ask_vault_pass'])
def test_no_vault_ids_auto_prompt_ask_vault_pass(self):
# create_new_password=False
# simulate 'ansible-vault edit --ask-vault-pass encrypted.yml'
res = cli.CLI.build_vault_ids([], auto_prompt=True, ask_vault_pass=True)
self.assertEqual(res, ['default@prompt_ask_vault_pass'])
def test_create_new_password_auto_prompt(self):
# simulate 'ansible-vault encrypt somefile.yml'
res = cli.CLI.build_vault_ids([], auto_prompt=True, create_new_password=True)
self.assertEqual(res, ['default@prompt_ask_vault_pass'])
def test_create_new_password_no_vault_id_ask_vault_pass(self):
res = cli.CLI.build_vault_ids([], ask_vault_pass=True,
create_new_password=True)
self.assertEqual(res, ['default@prompt_ask_vault_pass'])
def test_create_new_password_with_vault_ids(self):
res = cli.CLI.build_vault_ids(['foo@bar'], create_new_password=True)
self.assertEqual(res, ['foo@bar'])
def test_create_new_password_no_vault_ids_password_files(self):
res = cli.CLI.build_vault_ids([], vault_password_files=['some-password-file'],
create_new_password=True)
self.assertEqual(res, ['default@some-password-file'])
def test_everything(self):
res = cli.CLI.build_vault_ids(['blip@prompt', 'baz@prompt_ask_vault_pass',
'some-password-file', 'qux@another-password-file'],
vault_password_files=['yet-another-password-file',
'one-more-password-file'],
ask_vault_pass=True,
create_new_password=True,
auto_prompt=False)
self.assertEqual(set(res), set(['blip@prompt', 'baz@prompt_ask_vault_pass',
'default@prompt_ask_vault_pass',
'some-password-file', 'qux@another-password-file',
'default@yet-another-password-file',
'default@one-more-password-file']))
class TestCliSetupVaultSecrets(unittest.TestCase):
def setUp(self):
self.fake_loader = DictDataLoader({})
self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=True)
self.mock_isatty = self.tty_patcher.start()
self.display_v_patcher = patch('ansible.cli.display.verbosity', return_value=6)
self.mock_display_v = self.display_v_patcher.start()
cli.display.verbosity = 5
def tearDown(self):
self.tty_patcher.stop()
self.display_v_patcher.stop()
cli.display.verbosity = 0
def test(self):
res = cli.CLI.setup_vault_secrets(None, None, auto_prompt=False)
self.assertIsInstance(res, list)
@patch('ansible.cli.get_file_vault_secret')
def test_password_file(self, mock_file_secret):
filename = '/dev/null/secret'
mock_file_secret.return_value = MagicMock(bytes=b'file1_password',
vault_id='file1',
filename=filename)
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['secret1@%s' % filename, 'secret2'],
vault_password_files=[filename])
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['secret1'])
self.assertIn('secret1', [x[0] for x in matches])
match = matches[0][1]
self.assertEqual(match.bytes, b'file1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='prompt1')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['prompt1@prompt'],
ask_vault_pass=True,
auto_prompt=False)
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['prompt1'])
self.assertIn('prompt1', [x[0] for x in matches])
match = matches[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_no_tty(self, mock_prompt_secret):
self.mock_isatty.return_value = False
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='prompt1',
name='bytes_should_be_prompt1_password',
spec=vault.PromptVaultSecret)
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['prompt1@prompt'],
ask_vault_pass=True,
auto_prompt=False)
self.assertIsInstance(res, list)
self.assertEqual(len(res), 2)
matches = vault.match_secrets(res, ['prompt1'])
self.assertIn('prompt1', [x[0] for x in matches])
self.assertEquals(len(matches), 1)
@patch('ansible.cli.get_file_vault_secret')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_no_tty_and_password_file(self, mock_prompt_secret, mock_file_secret):
self.mock_isatty.return_value = False
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='prompt1')
filename = '/dev/null/secret'
mock_file_secret.return_value = MagicMock(bytes=b'file1_password',
vault_id='file1',
filename=filename)
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['prompt1@prompt', 'file1@/dev/null/secret'],
ask_vault_pass=True)
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['file1'])
self.assertIn('file1', [x[0] for x in matches])
self.assertNotIn('prompt1', [x[0] for x in matches])
match = matches[0][1]
self.assertEqual(match.bytes, b'file1_password')
def _assert_ids(self, vault_id_names, res, password=b'prompt1_password'):
self.assertIsInstance(res, list)
len_ids = len(vault_id_names)
matches = vault.match_secrets(res, vault_id_names)
self.assertEqual(len(res), len_ids, 'len(res):%s does not match len_ids:%s' % (len(res), len_ids))
self.assertEqual(len(matches), len_ids)
for index, prompt in enumerate(vault_id_names):
self.assertIn(prompt, [x[0] for x in matches])
# simple mock, same password/prompt for each mock_prompt_secret
self.assertEqual(matches[index][1].bytes, password)
@patch('ansible.cli.PromptVaultSecret')
def test_multiple_prompts(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='prompt1')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['prompt1@prompt',
'prompt2@prompt'],
ask_vault_pass=False)
vault_id_names = ['prompt1', 'prompt2']
self._assert_ids(vault_id_names, res)
@patch('ansible.cli.PromptVaultSecret')
def test_multiple_prompts_and_ask_vault_pass(self, mock_prompt_secret):
self.mock_isatty.return_value = False
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='prompt1')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['prompt1@prompt',
'prompt2@prompt',
'prompt3@prompt_ask_vault_pass'],
ask_vault_pass=True)
# We provide some vault-ids and secrets, so auto_prompt shouldn't get triggered,
# so there is
vault_id_names = ['prompt1', 'prompt2', 'prompt3', 'default']
self._assert_ids(vault_id_names, res)
@patch('ansible.cli.C')
@patch('ansible.cli.get_file_vault_secret')
@patch('ansible.cli.PromptVaultSecret')
def test_default_file_vault(self, mock_prompt_secret,
mock_file_secret,
mock_config):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='default')
mock_file_secret.return_value = MagicMock(bytes=b'file1_password',
vault_id='default')
mock_config.DEFAULT_VAULT_PASSWORD_FILE = '/dev/null/faux/vault_password_file'
mock_config.DEFAULT_VAULT_IDENTITY = 'default'
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=[],
create_new_password=False,
ask_vault_pass=False)
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['default'])
# --vault-password-file/DEFAULT_VAULT_PASSWORD_FILE is higher precendce than prompts
# if the same vault-id ('default') regardless of cli order since it didn't matter in 2.3
self.assertEqual(matches[0][1].bytes, b'file1_password')
self.assertEqual(len(matches), 1)
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=[],
create_new_password=False,
ask_vault_pass=True,
auto_prompt=True)
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['default'])
self.assertEqual(matches[0][1].bytes, b'file1_password')
self.assertEqual(matches[1][1].bytes, b'prompt1_password')
self.assertEqual(len(matches), 2)
@patch('ansible.cli.get_file_vault_secret')
@patch('ansible.cli.PromptVaultSecret')
def test_default_file_vault_identity_list(self, mock_prompt_secret,
mock_file_secret):
default_vault_ids = ['some_prompt@prompt',
'some_file@/dev/null/secret']
mock_prompt_secret.return_value = MagicMock(bytes=b'some_prompt_password',
vault_id='some_prompt')
filename = '/dev/null/secret'
mock_file_secret.return_value = MagicMock(bytes=b'some_file_password',
vault_id='some_file',
filename=filename)
vault_ids = default_vault_ids
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=vault_ids,
create_new_password=False,
ask_vault_pass=True)
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['some_file'])
# --vault-password-file/DEFAULT_VAULT_PASSWORD_FILE is higher precendce than prompts
# if the same vault-id ('default') regardless of cli order since it didn't matter in 2.3
self.assertEqual(matches[0][1].bytes, b'some_file_password')
matches = vault.match_secrets(res, ['some_prompt'])
self.assertEqual(matches[0][1].bytes, b'some_prompt_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_just_ask_vault_pass(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='default')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=[],
create_new_password=False,
ask_vault_pass=True)
self.assertIsInstance(res, list)
match = vault.match_secrets(res, ['default'])[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_new_password_ask_vault_pass(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='default')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=[],
create_new_password=True,
ask_vault_pass=True)
self.assertIsInstance(res, list)
match = vault.match_secrets(res, ['default'])[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_new_password_vault_id_prompt(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='some_vault_id')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['some_vault_id@prompt'],
create_new_password=True,
ask_vault_pass=False)
self.assertIsInstance(res, list)
match = vault.match_secrets(res, ['some_vault_id'])[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_new_password_vault_id_prompt_ask_vault_pass(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='default')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['some_vault_id@prompt_ask_vault_pass'],
create_new_password=True,
ask_vault_pass=False)
self.assertIsInstance(res, list)
match = vault.match_secrets(res, ['some_vault_id'])[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_new_password_vault_id_prompt_ask_vault_pass_ask_vault_pass(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='default')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['some_vault_id@prompt_ask_vault_pass'],
create_new_password=True,
ask_vault_pass=True)
self.assertIsInstance(res, list)
match = vault.match_secrets(res, ['some_vault_id'])[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
|
jmattbeal/avro
|
refs/heads/trunk
|
lang/py/test/av_bench.py
|
22
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from random import sample, choice, randint
from string import lowercase
import avro.datafile
import avro.schema
import avro.io
types = ["A", "CNAME"]
def rand_name():
return ''.join(sample(lowercase, 15))
def rand_ip():
return "%s.%s.%s.%s" %(randint(0,255), randint(0,255), randint(0,255), randint(0,255))
def write(n):
schema_s="""
{ "type": "record",
"name": "Query",
"fields" : [
{"name": "query", "type": "string"},
{"name": "response", "type": "string"},
{"name": "type", "type": "string", "default": "A"}
]}"""
out = open("datafile.avr",'w')
schema = avro.schema.parse(schema_s)
writer = avro.io.DatumWriter(schema)
dw = avro.datafile.DataFileWriter(out, writer, schema) #,codec='deflate')
for _ in xrange(n):
response = rand_ip()
query = rand_name()
type = choice(types)
dw.append({'query': query, 'response': response, 'type': type})
dw.close()
def read():
f = open("datafile.avr")
reader = avro.io.DatumReader()
af=avro.datafile.DataFileReader(f,reader)
x=0
for _ in af:
pass
def t(f, *args):
s = time.time()
f(*args)
e = time.time()
return e-s
if __name__ == "__main__":
n = int(sys.argv[1])
print "Write %0.4f" % t(write, n)
print "Read %0.4f" % t(read)
|
M4sse/chromium.src
|
refs/heads/nw12
|
ppapi/generators/idl_gen_wrapper.py
|
113
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base class for generating wrapper functions for PPAPI methods.
"""
from datetime import datetime
import os
import sys
from idl_c_proto import CGen
from idl_generator import Generator
from idl_log import ErrOut, InfoOut, WarnOut
from idl_option import GetOption
from idl_outfile import IDLOutFile
class PPKind(object):
@staticmethod
def ChoosePPFunc(iface, ppb_func, ppp_func):
name = iface.node.GetName()
if name.startswith("PPP"):
return ppp_func
elif name.startswith("PPB"):
return ppb_func
else:
raise Exception('Unknown PPKind for ' + name)
class Interface(object):
"""Tracks information about a particular interface version.
- struct_name: the struct type used by the ppapi headers to hold the
method pointers (the vtable).
- needs_wrapping: True if a method in the interface needs wrapping.
- header_file: the name of the header file that defined this interface.
"""
def __init__(self, interface_node, release, version,
struct_name, needs_wrapping, header_file):
self.node = interface_node
self.release = release
self.version = version
self.struct_name = struct_name
# We may want finer grained filtering (method level), but it is not
# yet clear how to actually do that.
self.needs_wrapping = needs_wrapping
self.header_file = header_file
class WrapperGen(Generator):
"""WrapperGen - An abstract class that generates wrappers for PPAPI methods.
This generates a wrapper PPB and PPP GetInterface, which directs users
to wrapper PPAPI methods. Wrapper PPAPI methods may perform arbitrary
work before invoking the real PPAPI method (supplied by the original
GetInterface functions).
Subclasses must implement GenerateWrapperForPPBMethod (and PPP).
"""
def __init__(self, wrapper_prefix, s1, s2, s3):
Generator.__init__(self, s1, s2, s3)
self.wrapper_prefix = wrapper_prefix
self._skip_opt = False
self.output_file = None
self.cgen = CGen()
def SetOutputFile(self, fname):
self.output_file = fname
def GenerateRelease(self, ast, release, options):
return self.GenerateRange(ast, [release], options)
@staticmethod
def GetHeaderName(name):
"""Get the corresponding ppapi .h file from each IDL filename.
"""
name = os.path.splitext(name)[0] + '.h'
name = name.replace(os.sep, '/')
return 'ppapi/c/' + name
def WriteCopyright(self, out):
now = datetime.now()
c = """/* Copyright (c) %s The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* NOTE: this is auto-generated from IDL */
""" % now.year
out.Write(c)
def GetWrapperMetadataName(self):
return '__%sWrapperInfo' % self.wrapper_prefix
def GenerateHelperFunctions(self, out):
"""Generate helper functions to avoid dependencies on libc.
"""
out.Write("""/* Use local strcmp to avoid dependency on libc. */
static int mystrcmp(const char* s1, const char *s2) {
while (1) {
if (*s1 == 0) break;
if (*s2 == 0) break;
if (*s1 != *s2) break;
++s1;
++s2;
}
return (int)(*s1) - (int)(*s2);
}\n
""")
def GenerateFixedFunctions(self, out):
"""Write out the set of constant functions (those that do not depend on
the current Pepper IDL).
"""
out.Write("""
static PPB_GetInterface __real_PPBGetInterface;
static PPP_GetInterface_Type __real_PPPGetInterface;
void __set_real_%(wrapper_prefix)s_PPBGetInterface(PPB_GetInterface real) {
__real_PPBGetInterface = real;
}
void __set_real_%(wrapper_prefix)s_PPPGetInterface(PPP_GetInterface_Type real) {
__real_PPPGetInterface = real;
}
/* Map interface string -> wrapper metadata */
static struct %(wrapper_struct)s *%(wrapper_prefix)sPPBShimIface(
const char *name) {
struct %(wrapper_struct)s **next = s_ppb_wrappers;
while (*next != NULL) {
if (mystrcmp(name, (*next)->iface_macro) == 0) return *next;
++next;
}
return NULL;
}
/* Map interface string -> wrapper metadata */
static struct %(wrapper_struct)s *%(wrapper_prefix)sPPPShimIface(
const char *name) {
struct %(wrapper_struct)s **next = s_ppp_wrappers;
while (*next != NULL) {
if (mystrcmp(name, (*next)->iface_macro) == 0) return *next;
++next;
}
return NULL;
}
const void *__%(wrapper_prefix)s_PPBGetInterface(const char *name) {
struct %(wrapper_struct)s *wrapper = %(wrapper_prefix)sPPBShimIface(name);
if (wrapper == NULL) {
/* We did not generate a wrapper for this, so return the real interface. */
return (*__real_PPBGetInterface)(name);
}
/* Initialize the real_iface if it hasn't been. The wrapper depends on it. */
if (wrapper->real_iface == NULL) {
const void *iface = (*__real_PPBGetInterface)(name);
if (NULL == iface) return NULL;
wrapper->real_iface = iface;
}
return wrapper->wrapped_iface;
}
const void *__%(wrapper_prefix)s_PPPGetInterface(const char *name) {
struct %(wrapper_struct)s *wrapper = %(wrapper_prefix)sPPPShimIface(name);
if (wrapper == NULL) {
/* We did not generate a wrapper for this, so return the real interface. */
return (*__real_PPPGetInterface)(name);
}
/* Initialize the real_iface if it hasn't been. The wrapper depends on it. */
if (wrapper->real_iface == NULL) {
const void *iface = (*__real_PPPGetInterface)(name);
if (NULL == iface) return NULL;
wrapper->real_iface = iface;
}
return wrapper->wrapped_iface;
}
""" % { 'wrapper_struct' : self.GetWrapperMetadataName(),
'wrapper_prefix' : self.wrapper_prefix,
} )
############################################################
def OwnHeaderFile(self):
"""Return the header file that specifies the API of this wrapper.
We do not generate the header files. """
raise Exception('Child class must implement this')
############################################################
def DetermineInterfaces(self, ast, releases):
"""Get a list of interfaces along with whatever metadata we need.
"""
iface_releases = []
for filenode in ast.GetListOf('File'):
# If this file has errors, skip it
if filenode in self.skip_list:
if GetOption('verbose'):
InfoOut.Log('WrapperGen: Skipping %s due to errors\n' %
filenode.GetName())
continue
file_name = self.GetHeaderName(filenode.GetName())
ifaces = filenode.GetListOf('Interface')
for iface in ifaces:
releases_for_iface = iface.GetUniqueReleases(releases)
for release in releases_for_iface:
version = iface.GetVersion(release)
struct_name = self.cgen.GetStructName(iface, release,
include_version=True)
needs_wrap = self.InterfaceVersionNeedsWrapping(iface, version)
if not needs_wrap:
if GetOption('verbose'):
InfoOut.Log('Interface %s ver %s does not need wrapping' %
(struct_name, version))
iface_releases.append(
Interface(iface, release, version,
struct_name, needs_wrap, file_name))
return iface_releases
def GenerateIncludes(self, iface_releases, out):
"""Generate the list of #include that define the original interfaces.
"""
self.WriteCopyright(out)
# First include own header.
out.Write('#include "%s"\n\n' % self.OwnHeaderFile())
# Get typedefs for PPB_GetInterface.
out.Write('#include "%s"\n' % self.GetHeaderName('ppb.h'))
# Only include headers where *some* interface needs wrapping.
header_files = set()
for iface in iface_releases:
if iface.needs_wrapping:
header_files.add(iface.header_file)
for header in sorted(header_files):
out.Write('#include "%s"\n' % header)
out.Write('\n')
def WrapperMethodPrefix(self, iface, release):
return '%s_%s_%s_' % (self.wrapper_prefix, release, iface.GetName())
def GenerateWrapperForPPBMethod(self, iface, member):
result = []
func_prefix = self.WrapperMethodPrefix(iface.node, iface.release)
sig = self.cgen.GetSignature(member, iface.release, 'store',
func_prefix, False)
result.append('static %s {\n' % sig)
result.append(' while(1) { /* Not implemented */ } \n')
result.append('}\n')
return result
def GenerateWrapperForPPPMethod(self, iface, member):
result = []
func_prefix = self.WrapperMethodPrefix(iface.node, iface.release)
sig = self.cgen.GetSignature(member, iface.release, 'store',
func_prefix, False)
result.append('static %s {\n' % sig)
result.append(' while(1) { /* Not implemented */ } \n')
result.append('}\n')
return result
def GenerateWrapperForMethods(self, iface_releases, comments=True):
"""Return a string representing the code for each wrapper method
(using a string rather than writing to the file directly for testing.)
"""
result = []
for iface in iface_releases:
if not iface.needs_wrapping:
if comments:
result.append('/* Not generating wrapper methods for %s */\n\n' %
iface.struct_name)
continue
if comments:
result.append('/* Begin wrapper methods for %s */\n\n' %
iface.struct_name)
generator = PPKind.ChoosePPFunc(iface,
self.GenerateWrapperForPPBMethod,
self.GenerateWrapperForPPPMethod)
for member in iface.node.GetListOf('Member'):
# Skip the method if it's not actually in the release.
if not member.InReleases([iface.release]):
continue
result.extend(generator(iface, member))
if comments:
result.append('/* End wrapper methods for %s */\n\n' %
iface.struct_name)
return ''.join(result)
def GenerateWrapperInterfaces(self, iface_releases, out):
for iface in iface_releases:
if not iface.needs_wrapping:
out.Write('/* Not generating wrapper interface for %s */\n\n' %
iface.struct_name)
continue
out.Write('static const struct %s %s_Wrappers_%s = {\n' % (
iface.struct_name, self.wrapper_prefix, iface.struct_name))
methods = []
for member in iface.node.GetListOf('Member'):
# Skip the method if it's not actually in the release.
if not member.InReleases([iface.release]):
continue
prefix = self.WrapperMethodPrefix(iface.node, iface.release)
# Casts are necessary for the PPB_* wrappers because we must
# cast away "__attribute__((pnaclcall))". The PPP_* wrappers
# must match the default calling conventions and so don't have
# the attribute, so omitting casts for them provides a little
# extra type checking.
if iface.node.GetName().startswith('PPB_'):
cast = '(%s)' % self.cgen.GetSignature(
member, iface.release, 'return',
prefix='',
func_as_ptr=True,
include_name=False)
else:
cast = ''
methods.append(' .%s = %s&%s%s' % (member.GetName(),
cast,
prefix,
member.GetName()))
out.Write(' ' + ',\n '.join(methods) + '\n')
out.Write('};\n\n')
def GetWrapperInfoName(self, iface):
return '%s_WrapperInfo_%s' % (self.wrapper_prefix, iface.struct_name)
def GenerateWrapperInfoAndCollection(self, iface_releases, out):
for iface in iface_releases:
iface_macro = self.cgen.GetInterfaceMacro(iface.node, iface.version)
if iface.needs_wrapping:
wrap_iface = '(const void *) &%s_Wrappers_%s' % (self.wrapper_prefix,
iface.struct_name)
out.Write("""static struct %s %s = {
.iface_macro = %s,
.wrapped_iface = %s,
.real_iface = NULL
};\n\n""" % (self.GetWrapperMetadataName(),
self.GetWrapperInfoName(iface),
iface_macro,
wrap_iface))
# Now generate NULL terminated arrays of the above wrapper infos.
ppb_wrapper_infos = []
ppp_wrapper_infos = []
for iface in iface_releases:
if iface.needs_wrapping:
appender = PPKind.ChoosePPFunc(iface,
ppb_wrapper_infos.append,
ppp_wrapper_infos.append)
appender(' &%s' % self.GetWrapperInfoName(iface))
ppb_wrapper_infos.append(' NULL')
ppp_wrapper_infos.append(' NULL')
out.Write(
'static struct %s *s_ppb_wrappers[] = {\n%s\n};\n\n' %
(self.GetWrapperMetadataName(), ',\n'.join(ppb_wrapper_infos)))
out.Write(
'static struct %s *s_ppp_wrappers[] = {\n%s\n};\n\n' %
(self.GetWrapperMetadataName(), ',\n'.join(ppp_wrapper_infos)))
def DeclareWrapperInfos(self, iface_releases, out):
"""The wrapper methods usually need access to the real_iface, so we must
declare these wrapper infos ahead of time (there is a circular dependency).
"""
out.Write('/* BEGIN Declarations for all Wrapper Infos */\n\n')
for iface in iface_releases:
if iface.needs_wrapping:
out.Write('static struct %s %s;\n' %
(self.GetWrapperMetadataName(),
self.GetWrapperInfoName(iface)))
out.Write('/* END Declarations for all Wrapper Infos. */\n\n')
def GenerateRange(self, ast, releases, options):
"""Generate shim code for a range of releases.
"""
# Remember to set the output filename before running this.
out_filename = self.output_file
if out_filename is None:
ErrOut.Log('Did not set filename for writing out wrapper\n')
return 1
InfoOut.Log("Generating %s for %s" % (out_filename, self.wrapper_prefix))
out = IDLOutFile(out_filename)
# Get a list of all the interfaces along with metadata.
iface_releases = self.DetermineInterfaces(ast, releases)
# Generate the includes.
self.GenerateIncludes(iface_releases, out)
# Write out static helper functions (mystrcmp).
self.GenerateHelperFunctions(out)
# Declare list of WrapperInfo before actual wrapper methods, since
# they reference each other.
self.DeclareWrapperInfos(iface_releases, out)
# Generate wrapper functions for each wrapped method in the interfaces.
result = self.GenerateWrapperForMethods(iface_releases)
out.Write(result)
# Collect all the wrapper functions into interface structs.
self.GenerateWrapperInterfaces(iface_releases, out)
# Generate a table of the wrapped interface structs that can be looked up.
self.GenerateWrapperInfoAndCollection(iface_releases, out)
# Write out the IDL-invariant functions.
self.GenerateFixedFunctions(out)
out.Close()
return 0
|
yashtrivedi96/coala
|
refs/heads/master
|
tests/results/ResultTest.py
|
5
|
import unittest
import json
from os.path import abspath
from coalib.results.Diff import Diff
from coalib.results.Result import RESULT_SEVERITY, Result
from coalib.results.SourceRange import SourceRange
from coalib.output.JSONEncoder import create_json_encoder
class ResultTest(unittest.TestCase):
def test_origin(self):
uut = Result('origin', 'msg')
self.assertEqual(uut.origin, 'origin')
uut = Result(self, 'msg')
self.assertEqual(uut.origin, 'ResultTest')
uut = Result(None, 'msg')
self.assertEqual(uut.origin, '')
def test_invalid_severity(self):
with self.assertRaises(ValueError):
Result('o', 'm', severity=-5)
def test_invalid_confidence(self):
with self.assertRaises(ValueError):
Result('o', 'm', confidence=-1)
with self.assertRaises(ValueError):
Result('o', 'm', confidence=101)
def test_message_arguments(self):
uut = Result('origin', '{msg}', message_arguments={'msg': 'msg'})
self.assertEqual(uut.message, 'msg')
with self.assertRaises(KeyError):
Result('origin', '{msg}', message_arguments={'message': 'msg'})
def test_string_dict(self):
uut = Result(None, '')
output = uut.to_string_dict()
self.assertEqual(output, {'id': str(uut.id),
'origin': '',
'message': '',
'file': '',
'line_nr': '',
'severity': 'NORMAL',
'debug_msg': '',
'additional_info': '',
'confidence': '100',
'message_base': '',
'message_arguments': '{}'})
uut = Result.from_values(origin='origin',
message='{test} msg',
message_arguments={'test': 'test'},
file='file',
line=2,
severity=RESULT_SEVERITY.INFO,
additional_info='hi!',
debug_msg='dbg',
confidence=50)
output = uut.to_string_dict()
self.assertEqual(output, {'id': str(uut.id),
'origin': 'origin',
'message': 'test msg',
'file': abspath('file'),
'line_nr': '2',
'severity': 'INFO',
'debug_msg': 'dbg',
'additional_info': 'hi!',
'confidence': '50',
'message_base': '{test} msg',
'message_arguments': '{\'test\': \'test\'}'})
uut = Result.from_values(origin='o', message='m', file='f', line=5)
output = uut.to_string_dict()
self.assertEqual(output['line_nr'], '5')
def test_apply(self):
file_dict = {
'f_a': ['1', '2', '3'],
'f_b': ['1', '2', '3']
}
expected_file_dict = {
'f_a': ['1', '3_changed'],
'f_b': ['1', '2', '3']
}
diff = Diff(file_dict['f_a'])
diff.delete_line(2)
diff.change_line(3, '3', '3_changed')
uut = Result('origin', 'msg', diffs={'f_a': diff})
uut.apply(file_dict)
self.assertEqual(file_dict, expected_file_dict)
def test_add(self):
file_dict = {
'f_a': ['1', '2', '3'],
'f_b': ['1', '2', '3'],
'f_c': ['1', '2', '3']
}
expected_file_dict = {
'f_a': ['1', '3_changed'],
'f_b': ['1', '2', '3_changed'],
'f_c': ['1', '2', '3']
}
diff = Diff(file_dict['f_a'])
diff.delete_line(2)
uut1 = Result('origin', 'msg', diffs={'f_a': diff})
diff = Diff(file_dict['f_a'])
diff.change_line(3, '3', '3_changed')
uut2 = Result('origin', 'msg', diffs={'f_a': diff})
diff = Diff(file_dict['f_b'])
diff.change_line(3, '3', '3_changed')
uut3 = Result('origin', 'msg', diffs={'f_b': diff})
uut1 += uut2 + uut3
uut1.apply(file_dict)
self.assertEqual(file_dict, expected_file_dict)
def test_overlaps(self):
overlapping_range = SourceRange.from_values('file1', 1, 1, 2, 2)
nonoverlapping_range = SourceRange.from_values('file2', 1, 1, 2, 2)
uut = Result.from_values('origin',
'message',
file='file1',
line=1,
column=1,
end_line=2,
end_column=2)
self.assertTrue(uut.overlaps(overlapping_range))
self.assertTrue(uut.overlaps([overlapping_range]))
self.assertFalse(uut.overlaps(nonoverlapping_range))
def test_location_repr(self):
result_a = Result(origin='o', message='m')
self.assertEqual(result_a.location_repr(), 'the whole project')
result_b = Result.from_values('o', 'm', file='e')
self.assertEqual(result_b.location_repr(), "'e'")
affected_code = (SourceRange.from_values('f'),
SourceRange.from_values('g'))
result_c = Result('o', 'm', affected_code=affected_code)
self.assertEqual(result_c.location_repr(), "'f', 'g'")
affected_code = (SourceRange.from_values('f'),
SourceRange.from_values('f'))
result_d = Result('o', 'm', affected_code=affected_code)
self.assertEqual(result_d.location_repr(), "'f'")
def test_json_diff(self):
file_dict = {
'f_a': ['1', '2', '3'],
'f_b': ['1', '2', '3']
}
diff = Diff(file_dict['f_a'])
diff.delete_line(2)
diff.change_line(3, '3', '3_changed')
uut = Result('origin', 'msg', diffs={'f_a': diff}).__json__(True)
self.assertEqual(uut['diffs']['f_a'].__json__(), '--- \n'
'+++ \n'
'@@ -1,3 +1,2 @@\n'
' 1-2-3+3_changed')
JSONEncoder = create_json_encoder(use_relpath=True)
json_dump = json.dumps(diff, cls=JSONEncoder, sort_keys=True)
self.assertEqual(
json_dump, '"--- \\n+++ \\n@@ -1,3 +1,2 @@\\n 1-2-3+3_changed"')
|
feinsteinben/normalizr
|
refs/heads/master
|
normalizr/regex.py
|
1
|
#-*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
"""
Regular expression to match URLs as seen on http://daringfireball.net/2010/07/improved_regex_for_matching_urls
"""
URL_REGEX = re.compile(
r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
re.IGNORECASE)
"""
Regular expression to match email addresses as seen on http://www.wellho.net/resources/ex.php4?item=y115/relib.py
"""
EMAIL_REGEX = re.compile(r"(?:^|\s)[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", re.IGNORECASE)
try:
EMOJI_REGEX = re.compile(u'([\U00002600-\U000027BF])|([\U0001f300-\U0001f64F])|([\U0001f680-\U0001f6FF])')
except re.error:
EMOJI_REGEX = re.compile(u'([\u2600-\u27BF])|([\uD83C][\uDF00-\uDFFF])|([\uD83D][\uDC00-\uDE4F])|([\uD83D][\uDE80-\uDEFF])')
|
crisis-economics/housingModel
|
refs/heads/master
|
src/main/resources/calibration/code/bak/RentalPrice.py
|
2
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 14 15:44:04 2015
@author: daniel
"""
#import sys
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
#from pandas import Series, DataFrame
import Datasets as ds
###### RENTAL BID PRICE DECISION
class RentalPriceDecision:
'Decision on how much to spend on rent'
incomeField = "hhincx" # "HYEARGRx"
renterData = pd.DataFrame()
population = []
xbins = []
ybins = []
xaxis = []
yaxis = []
popdf = pd.DataFrame()
# extract data from file
def __init__(self):
rawData = ds.EHSinterview()
incomeRent = rawData.loc[:,[self.incomeField,"rentwkx","bedrqx"]] # Filter for fields of interest
self.renterData = incomeRent[incomeRent["rentwkx"]>0] # filter out non renters
# self.renterData = self.renterData[self.renterData["bedrqx"]==4] # only consider one-bed
# split the data into 2D histogram data
self.population, self.xbins, self.ybins = np.histogram2d(
np.log(self.renterData["rentwkx"].values),
np.log(self.renterData[self.incomeField].values),
bins=[40,30])
self.xaxis = (np.array(self.xbins[1:]) + np.array(self.xbins[:-1]))/2.0
self.yaxis = (np.array(self.ybins[1:]) + np.array(self.ybins[:-1]))/2.0
self.popdf = pd.DataFrame(
data=np.zeros(((len(self.xbins)-1)*(len(self.ybins)-1),3)),
columns = ['rental price', 'income', 'p'])
i = 0
totalPop = self.population.sum()
for param in range(1,len(self.ybins)):
for out in range(1,len(self.xbins)):
self.popdf.iloc[i,0] = (self.xbins[out] + self.xbins[out-1])/2.0
self.popdf.iloc[i,1] = (self.ybins[param] + self.ybins[param-1])/2.0
self.popdf.iloc[i,2] = self.population[out-1,param-1]*1.0/totalPop
i += 1
# plot
def plotProbability(self):
plt.figure(figsize=(10, 10))
# im = plt.imshow(self.normalised(), origin='low', cmap=cm.jet)
im = plt.imshow(self.columnNormalised(), origin='low', cmap=cm.jet)
plt.colorbar(im, orientation='horizontal')
plt.show()
def plotAverage(self):
averageRent = np.dot(self.xbins[0:40],self.columnNormalise(self.population))
plt.plot(averageRent)
def columnNormalised(self):
col_sums = self.population.sum(axis=0)
probability = self.population / col_sums[np.newaxis,:]
return(probability)
def normalised(self):
return(self.population * 1.0 / self.population.sum())
rentPrice = RentalPriceDecision()
rentPrice.plotProbability()
#store = pd.HDFStore("pdfRentalPrice.hd5")
#store.append('data', pdf)
#store.close()
#rentPrice.renterData.replace(to_replace=".*more.*", value=100000, inplace=True, regex=True)
#print rentPrice.renterData
|
CamelBackNotation/CarnotKE
|
refs/heads/master
|
jyhton/lib-python/2.7/test/test_dummy_thread.py
|
127
|
"""Generic thread tests.
Meant to be used by dummy_thread and thread. To allow for different modules
to be used, test_main() can be called with the module to use as the thread
implementation as its sole argument.
"""
import dummy_thread as _thread
import time
import Queue
import random
import unittest
from test import test_support
DELAY = 0 # Set > 0 when testing a module other than dummy_thread, such as
# the 'thread' module.
class LockTests(unittest.TestCase):
"""Test lock objects."""
def setUp(self):
# Create a lock
self.lock = _thread.allocate_lock()
def test_initlock(self):
#Make sure locks start locked
self.assertTrue(not self.lock.locked(),
"Lock object is not initialized unlocked.")
def test_release(self):
# Test self.lock.release()
self.lock.acquire()
self.lock.release()
self.assertTrue(not self.lock.locked(),
"Lock object did not release properly.")
def test_improper_release(self):
#Make sure release of an unlocked thread raises _thread.error
self.assertRaises(_thread.error, self.lock.release)
def test_cond_acquire_success(self):
#Make sure the conditional acquiring of the lock works.
self.assertTrue(self.lock.acquire(0),
"Conditional acquiring of the lock failed.")
def test_cond_acquire_fail(self):
#Test acquiring locked lock returns False
self.lock.acquire(0)
self.assertTrue(not self.lock.acquire(0),
"Conditional acquiring of a locked lock incorrectly "
"succeeded.")
def test_uncond_acquire_success(self):
#Make sure unconditional acquiring of a lock works.
self.lock.acquire()
self.assertTrue(self.lock.locked(),
"Uncondional locking failed.")
def test_uncond_acquire_return_val(self):
#Make sure that an unconditional locking returns True.
self.assertTrue(self.lock.acquire(1) is True,
"Unconditional locking did not return True.")
self.assertTrue(self.lock.acquire() is True)
def test_uncond_acquire_blocking(self):
#Make sure that unconditional acquiring of a locked lock blocks.
def delay_unlock(to_unlock, delay):
"""Hold on to lock for a set amount of time before unlocking."""
time.sleep(delay)
to_unlock.release()
self.lock.acquire()
start_time = int(time.time())
_thread.start_new_thread(delay_unlock,(self.lock, DELAY))
if test_support.verbose:
print
print "*** Waiting for thread to release the lock "\
"(approx. %s sec.) ***" % DELAY
self.lock.acquire()
end_time = int(time.time())
if test_support.verbose:
print "done"
self.assertTrue((end_time - start_time) >= DELAY,
"Blocking by unconditional acquiring failed.")
class MiscTests(unittest.TestCase):
"""Miscellaneous tests."""
def test_exit(self):
#Make sure _thread.exit() raises SystemExit
self.assertRaises(SystemExit, _thread.exit)
def test_ident(self):
#Test sanity of _thread.get_ident()
self.assertIsInstance(_thread.get_ident(), int,
"_thread.get_ident() returned a non-integer")
self.assertTrue(_thread.get_ident() != 0,
"_thread.get_ident() returned 0")
def test_LockType(self):
#Make sure _thread.LockType is the same type as _thread.allocate_locke()
self.assertIsInstance(_thread.allocate_lock(), _thread.LockType,
"_thread.LockType is not an instance of what "
"is returned by _thread.allocate_lock()")
def test_interrupt_main(self):
#Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
self.assertRaises(KeyboardInterrupt, _thread.start_new_thread,
call_interrupt, tuple())
def test_interrupt_in_main(self):
# Make sure that if interrupt_main is called in main threat that
# KeyboardInterrupt is raised instantly.
self.assertRaises(KeyboardInterrupt, _thread.interrupt_main)
class ThreadTests(unittest.TestCase):
"""Test thread creation."""
def test_arg_passing(self):
#Make sure that parameter passing works.
def arg_tester(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2))
testing_queue = Queue.Queue(1)
_thread.start_new_thread(arg_tester, (testing_queue, True, True))
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using tuple failed")
_thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue,
'arg1':True, 'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using kwargs failed")
_thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using both tuple"
" and kwargs failed")
def test_multi_creation(self):
#Make sure multiple threads can be created.
def queue_mark(queue, delay):
"""Wait for ``delay`` seconds and then put something into ``queue``"""
time.sleep(delay)
queue.put(_thread.get_ident())
thread_count = 5
testing_queue = Queue.Queue(thread_count)
if test_support.verbose:
print
print "*** Testing multiple thread creation "\
"(will take approx. %s to %s sec.) ***" % (DELAY, thread_count)
for count in xrange(thread_count):
if DELAY:
local_delay = round(random.random(), 1)
else:
local_delay = 0
_thread.start_new_thread(queue_mark,
(testing_queue, local_delay))
time.sleep(DELAY)
if test_support.verbose:
print 'done'
self.assertTrue(testing_queue.qsize() == thread_count,
"Not all %s threads executed properly after %s sec." %
(thread_count, DELAY))
def test_main(imported_module=None):
global _thread, DELAY
if imported_module:
_thread = imported_module
DELAY = 2
if test_support.verbose:
print
print "*** Using %s as _thread module ***" % _thread
test_support.run_unittest(LockTests, MiscTests, ThreadTests)
if __name__ == '__main__':
test_main()
|
blackzw/openwrt_sdk_dev1
|
refs/heads/master
|
staging_dir/host/lib/python2.7/test/string_tests.py
|
29
|
"""
Common tests shared by test_str, test_unicode, test_userstring and test_string.
"""
import unittest, string, sys, struct
from test import test_support
from UserList import UserList
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123L]
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class CommonTest(unittest.TestCase):
# This testcase contains test that can be used in all
# stringlike classes. Currently this is str, unicode
# UserString and the string module.
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtype(value))
for (key, value) in obj.iteritems()
])
else:
return obj
# check that object.method(*args) returns result
def checkequal(self, result, object, methodname, *args):
result = self.fixtype(result)
object = self.fixtype(object)
args = self.fixtype(args)
realresult = getattr(object, methodname)(*args)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if object == realresult:
class subtype(self.__class__.type2test):
pass
object = subtype(object)
realresult = getattr(object, methodname)(*args)
self.assertTrue(object is not realresult)
# check that object.method(*args) raises exc
def checkraises(self, exc, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
self.assertRaises(
exc,
getattr(object, methodname),
*args
)
# call object.method(*args) without any checks
def checkcall(self, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
getattr(object, methodname)(*args)
def test_hash(self):
# SF bug 1054139: += optimization was not invalidating cached hash value
a = self.type2test('DNSSEC')
b = self.type2test('')
for c in a:
b += c
hash(b)
self.assertEqual(hash(a), hash(b))
def test_capitalize(self):
self.checkequal(' hello ', ' hello ', 'capitalize')
self.checkequal('Hello ', 'Hello ','capitalize')
self.checkequal('Hello ', 'hello ','capitalize')
self.checkequal('Aaaa', 'aaaa', 'capitalize')
self.checkequal('Aaaa', 'AaAa', 'capitalize')
self.checkraises(TypeError, 'hello', 'capitalize', 42)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(2, 'aaa', 'count', 'a', 1)
self.checkequal(0, 'aaa', 'count', 'a', 10)
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(1, 'aaa', 'count', 'a', 0, 1)
self.checkequal(3, 'aaa', 'count', 'a', 0, 10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkequal(3, 'aaa', 'count', '', 1)
self.checkequal(1, 'aaa', 'count', '', 3)
self.checkequal(0, 'aaa', 'count', '', 10)
self.checkequal(2, 'aaa', 'count', '', -1)
self.checkequal(4, 'aaa', 'count', '', -10)
self.checkequal(1, '', 'count', '')
self.checkequal(0, '', 'count', '', 1, 1)
self.checkequal(0, '', 'count', '', sys.maxint, 0)
self.checkequal(0, '', 'count', 'xx')
self.checkequal(0, '', 'count', 'xx', 1, 1)
self.checkequal(0, '', 'count', 'xx', sys.maxint, 0)
self.checkraises(TypeError, 'hello', 'count')
self.checkraises(TypeError, 'hello', 'count', 42)
# For a variety of combinations,
# verify that str.count() matches an equivalent function
# replacing all occurrences and then differencing the string lengths
charset = ['', 'a', 'b']
digits = 7
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
n = len(i)
for j in teststrings:
r1 = i.count(j)
if j:
r2, rem = divmod(n - len(i.replace(j, '')), len(j))
else:
r2, rem = len(i)+1, 0
if rem or r1 != r2:
self.assertEqual(rem, 0, '%s != 0 for %s' % (rem, i))
self.assertEqual(r1, r2, '%s != %s for %s' % (r1, r2, i))
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkequal(0, 'abc', 'find', '', 0)
self.checkequal(3, 'abc', 'find', '', 3)
self.checkequal(-1, 'abc', 'find', '', 4)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'find', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'find')
self.checkraises(TypeError, 'hello', 'find', 42)
self.checkequal(0, '', 'find', '')
self.checkequal(-1, '', 'find', '', 1, 1)
self.checkequal(-1, '', 'find', '', sys.maxint, 0)
self.checkequal(-1, '', 'find', 'xx')
self.checkequal(-1, '', 'find', 'xx', 1, 1)
self.checkequal(-1, '', 'find', 'xx', sys.maxint, 0)
# issue 7458
self.checkequal(-1, 'ab', 'find', 'xxx', sys.maxsize + 1, 0)
# For a variety of combinations,
# verify that str.find() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
for j in teststrings:
loc = i.find(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkequal(3, 'abc', 'rfind', '', 0)
self.checkequal(3, 'abc', 'rfind', '', 3)
self.checkequal(-1, 'abc', 'rfind', '', 4)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'rfind', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rfind', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rfind')
self.checkraises(TypeError, 'hello', 'rfind', 42)
# For a variety of combinations,
# verify that str.rfind() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
for j in teststrings:
loc = i.rfind(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], self.fixtype(j))
# issue 7458
self.checkequal(-1, 'ab', 'rfind', 'xxx', sys.maxsize + 1, 0)
def test_index(self):
self.checkequal(0, 'abcdefghiabc', 'index', '')
self.checkequal(3, 'abcdefghiabc', 'index', 'def')
self.checkequal(0, 'abcdefghiabc', 'index', 'abc')
self.checkequal(9, 'abcdefghiabc', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghiabc', 'index', 'hib')
self.checkraises(ValueError, 'abcdefghiab', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', 8)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', -1)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'index', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'index')
self.checkraises(TypeError, 'hello', 'index', 42)
def test_rindex(self):
self.checkequal(12, 'abcdefghiabc', 'rindex', '')
self.checkequal(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequal(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequal(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghiabc', 'rindex', 'hib')
self.checkraises(ValueError, 'defghiabc', 'rindex', 'def', 1)
self.checkraises(ValueError, 'defghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, 8)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, -1)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'rindex', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rindex', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rindex')
self.checkraises(TypeError, 'hello', 'rindex', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi', 'expandtabs', 4)
self.checkequal(' a\n b', ' \ta\n\tb', 'expandtabs', 1)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxint < (1 << 32) and struct.calcsize('P') == 4:
self.checkraises(OverflowError,
'\ta\n\tb', 'expandtabs', sys.maxint)
def test_split(self):
self.checkequal(['this', 'is', 'the', 'split', 'function'],
'this is the split function', 'split')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'split')
self.checkequal(['a', 'b c d'], 'a b c d', 'split', None, 1)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None,
sys.maxint-1)
self.checkequal(['a b c d'], 'a b c d', 'split', None, 0)
self.checkequal(['a b c d'], ' a b c d', 'split', None, 0)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal([], ' ', 'split')
self.checkequal(['a'], ' a ', 'split')
self.checkequal(['a', 'b'], ' a b ', 'split')
self.checkequal(['a', 'b '], ' a b ', 'split', None, 1)
self.checkequal(['a', 'b c '], ' a b c ', 'split', None, 1)
self.checkequal(['a', 'b', 'c '], ' a b c ', 'split', None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'split')
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'split')
self.checkequal(['a'] + [aaa[4:]], aaa, 'split', None, 1)
self.checkequal(['a']*19 + ['a '], aaa, 'split', None, 19)
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|')
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', 'b|c|d'], 'a|b|c|d', 'split', '|', 1)
self.checkequal(['a', 'b', 'c|d'], 'a|b|c|d', 'split', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|',
sys.maxint-2)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', '', 'b||c||d'], 'a||b||c||d', 'split', '|', 2)
self.checkequal(['endcase ', ''], 'endcase |', 'split', '|')
self.checkequal(['', ' startcase'], '| startcase', 'split', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'split', '|')
self.checkequal(['a', '', 'b\x00c\x00d'], 'a\x00\x00b\x00c\x00d', 'split', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'split', '|')
self.checkequal(['a']*15 +['a|a|a|a|a'],
('a|'*20)[:-1], 'split', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequal(['a', 'b//c//d'], 'a//b//c//d', 'split', '//', 1)
self.checkequal(['a', 'b', 'c//d'], 'a//b//c//d', 'split', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//',
sys.maxint-10)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'split', '//', 0)
self.checkequal(['a', '', 'b////c////d'], 'a////b////c////d', 'split', '//', 2)
self.checkequal(['endcase ', ''], 'endcase test', 'split', 'test')
self.checkequal(['', ' begincase'], 'test begincase', 'split', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'split', 'test')
self.checkequal(['a', 'bc'], 'abbbc', 'split', 'bb')
self.checkequal(['', ''], 'aaa', 'split', 'aaa')
self.checkequal(['aaa'], 'aaa', 'split', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'split', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'split', 'aab')
self.checkequal([''], '', 'split', 'aaa')
self.checkequal(['aa'], 'aa', 'split', 'aaa')
self.checkequal(['A', 'bobb'], 'Abbobbbobb', 'split', 'bbobb')
self.checkequal(['A', 'B', ''], 'AbbobbBbbobb', 'split', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH', 19)
self.checkequal(['a']*18 + ['aBLAHa'], ('aBLAH'*20)[:-4],
'split', 'BLAH', 18)
# mixed use of str and unicode
self.checkequal([u'a', u'b', u'c d'], 'a b c d', 'split', u' ', 2)
# argument type
self.checkraises(TypeError, 'hello', 'split', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'split', '')
self.checkraises(ValueError, 'hello', 'split', '', 0)
def test_rsplit(self):
self.checkequal(['this', 'is', 'the', 'rsplit', 'function'],
'this is the rsplit function', 'rsplit')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'rsplit')
self.checkequal(['a b c', 'd'], 'a b c d', 'rsplit', None, 1)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None,
sys.maxint-20)
self.checkequal(['a b c d'], 'a b c d', 'rsplit', None, 0)
self.checkequal(['a b c d'], 'a b c d ', 'rsplit', None, 0)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal([], ' ', 'rsplit')
self.checkequal(['a'], ' a ', 'rsplit')
self.checkequal(['a', 'b'], ' a b ', 'rsplit')
self.checkequal([' a', 'b'], ' a b ', 'rsplit', None, 1)
self.checkequal([' a b','c'], ' a b c ', 'rsplit',
None, 1)
self.checkequal([' a', 'b', 'c'], ' a b c ', 'rsplit',
None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'rsplit', None, 88)
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'rsplit')
self.checkequal([aaa[:-4]] + ['a'], aaa, 'rsplit', None, 1)
self.checkequal([' a a'] + ['a']*18, aaa, 'rsplit', None, 18)
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|')
self.checkequal(['a|b|c', 'd'], 'a|b|c|d', 'rsplit', '|', 1)
self.checkequal(['a|b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|',
sys.maxint-100)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'rsplit', '|', 0)
self.checkequal(['a||b||c', '', 'd'], 'a||b||c||d', 'rsplit', '|', 2)
self.checkequal(['', ' begincase'], '| begincase', 'rsplit', '|')
self.checkequal(['endcase ', ''], 'endcase |', 'rsplit', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'rsplit', '|')
self.checkequal(['a\x00\x00b', 'c', 'd'], 'a\x00\x00b\x00c\x00d', 'rsplit', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'rsplit', '|')
self.checkequal(['a|a|a|a|a']+['a']*15,
('a|'*20)[:-1], 'rsplit', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//')
self.checkequal(['a//b//c', 'd'], 'a//b//c//d', 'rsplit', '//', 1)
self.checkequal(['a//b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//',
sys.maxint-5)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'rsplit', '//', 0)
self.checkequal(['a////b////c', '', 'd'], 'a////b////c////d', 'rsplit', '//', 2)
self.checkequal(['', ' begincase'], 'test begincase', 'rsplit', 'test')
self.checkequal(['endcase ', ''], 'endcase test', 'rsplit', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'rsplit', 'test')
self.checkequal(['ab', 'c'], 'abbbc', 'rsplit', 'bb')
self.checkequal(['', ''], 'aaa', 'rsplit', 'aaa')
self.checkequal(['aaa'], 'aaa', 'rsplit', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'rsplit', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'rsplit', 'aab')
self.checkequal([''], '', 'rsplit', 'aaa')
self.checkequal(['aa'], 'aa', 'rsplit', 'aaa')
self.checkequal(['bbob', 'A'], 'bbobbbobbA', 'rsplit', 'bbobb')
self.checkequal(['', 'B', 'A'], 'bbobbBbbobbA', 'rsplit', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH', 19)
self.checkequal(['aBLAHa'] + ['a']*18, ('aBLAH'*20)[:-4],
'rsplit', 'BLAH', 18)
# mixed use of str and unicode
self.checkequal([u'a b', u'c', u'd'], 'a b c d', 'rsplit', u' ', 2)
# argument type
self.checkraises(TypeError, 'hello', 'rsplit', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'rsplit', '')
self.checkraises(ValueError, 'hello', 'rsplit', '', 0)
def test_strip(self):
self.checkequal('hello', ' hello ', 'strip')
self.checkequal('hello ', ' hello ', 'lstrip')
self.checkequal(' hello', ' hello ', 'rstrip')
self.checkequal('hello', 'hello', 'strip')
# strip/lstrip/rstrip with None arg
self.checkequal('hello', ' hello ', 'strip', None)
self.checkequal('hello ', ' hello ', 'lstrip', None)
self.checkequal(' hello', ' hello ', 'rstrip', None)
self.checkequal('hello', 'hello', 'strip', None)
# strip/lstrip/rstrip with str arg
self.checkequal('hello', 'xyzzyhelloxyzzy', 'strip', 'xyz')
self.checkequal('helloxyzzy', 'xyzzyhelloxyzzy', 'lstrip', 'xyz')
self.checkequal('xyzzyhello', 'xyzzyhelloxyzzy', 'rstrip', 'xyz')
self.checkequal('hello', 'hello', 'strip', 'xyz')
# strip/lstrip/rstrip with unicode arg
if test_support.have_unicode:
self.checkequal(unicode('hello', 'ascii'), 'xyzzyhelloxyzzy',
'strip', unicode('xyz', 'ascii'))
self.checkequal(unicode('helloxyzzy', 'ascii'), 'xyzzyhelloxyzzy',
'lstrip', unicode('xyz', 'ascii'))
self.checkequal(unicode('xyzzyhello', 'ascii'), 'xyzzyhelloxyzzy',
'rstrip', unicode('xyz', 'ascii'))
# XXX
#self.checkequal(unicode('hello', 'ascii'), 'hello',
# 'strip', unicode('xyz', 'ascii'))
self.checkraises(TypeError, 'hello', 'strip', 42, 42)
self.checkraises(TypeError, 'hello', 'lstrip', 42, 42)
self.checkraises(TypeError, 'hello', 'rstrip', 42, 42)
def test_ljust(self):
self.checkequal('abc ', 'abc', 'ljust', 10)
self.checkequal('abc ', 'abc', 'ljust', 6)
self.checkequal('abc', 'abc', 'ljust', 3)
self.checkequal('abc', 'abc', 'ljust', 2)
self.checkequal('abc*******', 'abc', 'ljust', 10, '*')
self.checkraises(TypeError, 'abc', 'ljust')
def test_rjust(self):
self.checkequal(' abc', 'abc', 'rjust', 10)
self.checkequal(' abc', 'abc', 'rjust', 6)
self.checkequal('abc', 'abc', 'rjust', 3)
self.checkequal('abc', 'abc', 'rjust', 2)
self.checkequal('*******abc', 'abc', 'rjust', 10, '*')
self.checkraises(TypeError, 'abc', 'rjust')
def test_center(self):
self.checkequal(' abc ', 'abc', 'center', 10)
self.checkequal(' abc ', 'abc', 'center', 6)
self.checkequal('abc', 'abc', 'center', 3)
self.checkequal('abc', 'abc', 'center', 2)
self.checkequal('***abc****', 'abc', 'center', 10, '*')
self.checkraises(TypeError, 'abc', 'center')
def test_swapcase(self):
self.checkequal('hEllO CoMPuTErS', 'HeLLo cOmpUteRs', 'swapcase')
self.checkraises(TypeError, 'hello', 'swapcase', 42)
def test_replace(self):
EQ = self.checkequal
# Operations on the empty string
EQ("", "", "replace", "", "")
EQ("A", "", "replace", "", "A")
EQ("", "", "replace", "A", "")
EQ("", "", "replace", "A", "A")
EQ("", "", "replace", "", "", 100)
EQ("", "", "replace", "", "", sys.maxint)
# interleave (from=="", 'to' gets inserted everywhere)
EQ("A", "A", "replace", "", "")
EQ("*A*", "A", "replace", "", "*")
EQ("*1A*1", "A", "replace", "", "*1")
EQ("*-#A*-#", "A", "replace", "", "*-#")
EQ("*-A*-A*-", "AA", "replace", "", "*-")
EQ("*-A*-A*-", "AA", "replace", "", "*-", -1)
EQ("*-A*-A*-", "AA", "replace", "", "*-", sys.maxint)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 4)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 3)
EQ("*-A*-A", "AA", "replace", "", "*-", 2)
EQ("*-AA", "AA", "replace", "", "*-", 1)
EQ("AA", "AA", "replace", "", "*-", 0)
# single character deletion (from=="A", to=="")
EQ("", "A", "replace", "A", "")
EQ("", "AAA", "replace", "A", "")
EQ("", "AAA", "replace", "A", "", -1)
EQ("", "AAA", "replace", "A", "", sys.maxint)
EQ("", "AAA", "replace", "A", "", 4)
EQ("", "AAA", "replace", "A", "", 3)
EQ("A", "AAA", "replace", "A", "", 2)
EQ("AA", "AAA", "replace", "A", "", 1)
EQ("AAA", "AAA", "replace", "A", "", 0)
EQ("", "AAAAAAAAAA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "", -1)
EQ("BCD", "ABACADA", "replace", "A", "", sys.maxint)
EQ("BCD", "ABACADA", "replace", "A", "", 5)
EQ("BCD", "ABACADA", "replace", "A", "", 4)
EQ("BCDA", "ABACADA", "replace", "A", "", 3)
EQ("BCADA", "ABACADA", "replace", "A", "", 2)
EQ("BACADA", "ABACADA", "replace", "A", "", 1)
EQ("ABACADA", "ABACADA", "replace", "A", "", 0)
EQ("BCD", "ABCAD", "replace", "A", "")
EQ("BCD", "ABCADAA", "replace", "A", "")
EQ("BCD", "BCD", "replace", "A", "")
EQ("*************", "*************", "replace", "A", "")
EQ("^A^", "^"+"A"*1000+"^", "replace", "A", "", 999)
# substring deletion (from=="the", to=="")
EQ("", "the", "replace", "the", "")
EQ("ater", "theater", "replace", "the", "")
EQ("", "thethe", "replace", "the", "")
EQ("", "thethethethe", "replace", "the", "")
EQ("aaaa", "theatheatheathea", "replace", "the", "")
EQ("that", "that", "replace", "the", "")
EQ("thaet", "thaet", "replace", "the", "")
EQ("here and re", "here and there", "replace", "the", "")
EQ("here and re and re", "here and there and there",
"replace", "the", "", sys.maxint)
EQ("here and re and re", "here and there and there",
"replace", "the", "", -1)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 3)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 2)
EQ("here and re and there", "here and there and there",
"replace", "the", "", 1)
EQ("here and there and there", "here and there and there",
"replace", "the", "", 0)
EQ("here and re and re", "here and there and there", "replace", "the", "")
EQ("abc", "abc", "replace", "the", "")
EQ("abcdefg", "abcdefg", "replace", "the", "")
# substring deletion (from=="bob", to=="")
EQ("bob", "bbobob", "replace", "bob", "")
EQ("bobXbob", "bbobobXbbobob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaabob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaa", "replace", "bob", "")
# single character replace in place (len(from)==len(to)==1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "o")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", sys.maxint)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", -1)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 3)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 2)
EQ("WhO goes there?", "Who goes there?", "replace", "o", "O", 1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "O", 0)
EQ("Who goes there?", "Who goes there?", "replace", "a", "q")
EQ("who goes there?", "Who goes there?", "replace", "W", "w")
EQ("wwho goes there?ww", "WWho goes there?WW", "replace", "W", "w")
EQ("Who goes there!", "Who goes there?", "replace", "?", "!")
EQ("Who goes there!!", "Who goes there??", "replace", "?", "!")
EQ("Who goes there?", "Who goes there?", "replace", ".", "!")
# substring replace in place (len(from)==len(to) > 1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**")
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", sys.maxint)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", -1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 4)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 3)
EQ("Th** ** a tissue", "This is a tissue", "replace", "is", "**", 2)
EQ("Th** is a tissue", "This is a tissue", "replace", "is", "**", 1)
EQ("This is a tissue", "This is a tissue", "replace", "is", "**", 0)
EQ("cobob", "bobob", "replace", "bob", "cob")
EQ("cobobXcobocob", "bobobXbobobob", "replace", "bob", "cob")
EQ("bobob", "bobob", "replace", "bot", "bot")
# replace single character (len(from)==1, len(to)>1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK")
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", -1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", sys.maxint)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", 2)
EQ("ReyKKjavik", "Reykjavik", "replace", "k", "KK", 1)
EQ("Reykjavik", "Reykjavik", "replace", "k", "KK", 0)
EQ("A----B----C----", "A.B.C.", "replace", ".", "----")
EQ("Reykjavik", "Reykjavik", "replace", "q", "KK")
# replace substring (len(from)>1, len(to)!=len(from))
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham")
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", sys.maxint)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", -1)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 4)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 3)
EQ("ham, ham, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 2)
EQ("ham, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 1)
EQ("spam, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 0)
EQ("bobob", "bobobob", "replace", "bobob", "bob")
EQ("bobobXbobob", "bobobobXbobobob", "replace", "bobob", "bob")
EQ("BOBOBOB", "BOBOBOB", "replace", "bob", "bobby")
with test_support.check_py3k_warnings():
ba = buffer('a')
bb = buffer('b')
EQ("bbc", "abc", "replace", ba, bb)
EQ("aac", "abc", "replace", bb, ba)
#
self.checkequal('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.checkequal('onetwothree', 'one!two!three!', 'replace', '!', '')
self.checkequal('one@two@three!', 'one!two!three!', 'replace', '!', '@', 2)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 3)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 4)
self.checkequal('one!two!three!', 'one!two!three!', 'replace', '!', '@', 0)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@', 2)
self.checkequal('-a-b-c-', 'abc', 'replace', '', '-')
self.checkequal('-a-b-c', 'abc', 'replace', '', '-', 3)
self.checkequal('abc', 'abc', 'replace', '', '-', 0)
self.checkequal('', '', 'replace', '', '')
self.checkequal('abc', 'abc', 'replace', 'ab', '--', 0)
self.checkequal('abc', 'abc', 'replace', 'xy', '--')
# Next three for SF bug 422088: [OSF1 alpha] string.replace(); died with
# MemoryError due to empty result (platform malloc issue when requesting
# 0 bytes).
self.checkequal('', '123', 'replace', '123', '')
self.checkequal('', '123123', 'replace', '123', '')
self.checkequal('x', '123x123', 'replace', '123', '')
self.checkraises(TypeError, 'hello', 'replace')
self.checkraises(TypeError, 'hello', 'replace', 42)
self.checkraises(TypeError, 'hello', 'replace', 42, 'h')
self.checkraises(TypeError, 'hello', 'replace', 'h', 42)
def test_replace_overflow(self):
# Check for overflow checking on 32 bit machines
if sys.maxint != 2147483647 or struct.calcsize("P") > 4:
return
A2_16 = "A" * (2**16)
self.checkraises(OverflowError, A2_16, "replace", "", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "A", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "AA", A2_16+A2_16)
def test_zfill(self):
self.checkequal('123', '123', 'zfill', 2)
self.checkequal('123', '123', 'zfill', 3)
self.checkequal('0123', '123', 'zfill', 4)
self.checkequal('+123', '+123', 'zfill', 3)
self.checkequal('+123', '+123', 'zfill', 4)
self.checkequal('+0123', '+123', 'zfill', 5)
self.checkequal('-123', '-123', 'zfill', 3)
self.checkequal('-123', '-123', 'zfill', 4)
self.checkequal('-0123', '-123', 'zfill', 5)
self.checkequal('000', '', 'zfill', 3)
self.checkequal('34', '34', 'zfill', 1)
self.checkequal('0034', '34', 'zfill', 4)
self.checkraises(TypeError, '123', 'zfill')
# XXX alias for py3k forward compatibility
BaseTest = CommonTest
class MixinStrUnicodeUserStringTest:
# additional tests that only work for
# stringlike objects, i.e. str, unicode, UserString
# (but not the string module)
def test_islower(self):
self.checkequal(False, '', 'islower')
self.checkequal(True, 'a', 'islower')
self.checkequal(False, 'A', 'islower')
self.checkequal(False, '\n', 'islower')
self.checkequal(True, 'abc', 'islower')
self.checkequal(False, 'aBc', 'islower')
self.checkequal(True, 'abc\n', 'islower')
self.checkraises(TypeError, 'abc', 'islower', 42)
def test_isupper(self):
self.checkequal(False, '', 'isupper')
self.checkequal(False, 'a', 'isupper')
self.checkequal(True, 'A', 'isupper')
self.checkequal(False, '\n', 'isupper')
self.checkequal(True, 'ABC', 'isupper')
self.checkequal(False, 'AbC', 'isupper')
self.checkequal(True, 'ABC\n', 'isupper')
self.checkraises(TypeError, 'abc', 'isupper', 42)
def test_istitle(self):
self.checkequal(False, '', 'istitle')
self.checkequal(False, 'a', 'istitle')
self.checkequal(True, 'A', 'istitle')
self.checkequal(False, '\n', 'istitle')
self.checkequal(True, 'A Titlecased Line', 'istitle')
self.checkequal(True, 'A\nTitlecased Line', 'istitle')
self.checkequal(True, 'A Titlecased, Line', 'istitle')
self.checkequal(False, 'Not a capitalized String', 'istitle')
self.checkequal(False, 'Not\ta Titlecase String', 'istitle')
self.checkequal(False, 'Not--a Titlecase String', 'istitle')
self.checkequal(False, 'NOT', 'istitle')
self.checkraises(TypeError, 'abc', 'istitle', 42)
def test_isspace(self):
self.checkequal(False, '', 'isspace')
self.checkequal(False, 'a', 'isspace')
self.checkequal(True, ' ', 'isspace')
self.checkequal(True, '\t', 'isspace')
self.checkequal(True, '\r', 'isspace')
self.checkequal(True, '\n', 'isspace')
self.checkequal(True, ' \t\r\n', 'isspace')
self.checkequal(False, ' \t\r\na', 'isspace')
self.checkraises(TypeError, 'abc', 'isspace', 42)
def test_isalpha(self):
self.checkequal(False, '', 'isalpha')
self.checkequal(True, 'a', 'isalpha')
self.checkequal(True, 'A', 'isalpha')
self.checkequal(False, '\n', 'isalpha')
self.checkequal(True, 'abc', 'isalpha')
self.checkequal(False, 'aBc123', 'isalpha')
self.checkequal(False, 'abc\n', 'isalpha')
self.checkraises(TypeError, 'abc', 'isalpha', 42)
def test_isalnum(self):
self.checkequal(False, '', 'isalnum')
self.checkequal(True, 'a', 'isalnum')
self.checkequal(True, 'A', 'isalnum')
self.checkequal(False, '\n', 'isalnum')
self.checkequal(True, '123abc456', 'isalnum')
self.checkequal(True, 'a1b3c', 'isalnum')
self.checkequal(False, 'aBc000 ', 'isalnum')
self.checkequal(False, 'abc\n', 'isalnum')
self.checkraises(TypeError, 'abc', 'isalnum', 42)
def test_isdigit(self):
self.checkequal(False, '', 'isdigit')
self.checkequal(False, 'a', 'isdigit')
self.checkequal(True, '0', 'isdigit')
self.checkequal(True, '0123456789', 'isdigit')
self.checkequal(False, '0123456789a', 'isdigit')
self.checkraises(TypeError, 'abc', 'isdigit', 42)
def test_title(self):
self.checkequal(' Hello ', ' hello ', 'title')
self.checkequal('Hello ', 'hello ', 'title')
self.checkequal('Hello ', 'Hello ', 'title')
self.checkequal('Format This As Title String', "fOrMaT thIs aS titLe String", 'title')
self.checkequal('Format,This-As*Title;String', "fOrMaT,thIs-aS*titLe;String", 'title', )
self.checkequal('Getint', "getInt", 'title')
self.checkraises(TypeError, 'hello', 'title', 42)
def test_splitlines(self):
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\rghi", 'splitlines')
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi\n", 'splitlines')
self.checkequal(['abc', 'def', 'ghi', ''], "abc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'], "\nabc\ndef\r\nghi\n\r", 'splitlines', 1)
self.checkraises(TypeError, 'abc', 'splitlines', 42, 42)
def test_startswith(self):
self.checkequal(True, 'hello', 'startswith', 'he')
self.checkequal(True, 'hello', 'startswith', 'hello')
self.checkequal(False, 'hello', 'startswith', 'hello world')
self.checkequal(True, 'hello', 'startswith', '')
self.checkequal(False, 'hello', 'startswith', 'ello')
self.checkequal(True, 'hello', 'startswith', 'ello', 1)
self.checkequal(True, 'hello', 'startswith', 'o', 4)
self.checkequal(False, 'hello', 'startswith', 'o', 5)
self.checkequal(True, 'hello', 'startswith', '', 5)
self.checkequal(False, 'hello', 'startswith', 'lo', 6)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'startswith', 'lowo', 3, 6)
# test negative indices
self.checkequal(True, 'hello', 'startswith', 'he', 0, -1)
self.checkequal(True, 'hello', 'startswith', 'he', -53, -1)
self.checkequal(False, 'hello', 'startswith', 'hello', 0, -1)
self.checkequal(False, 'hello', 'startswith', 'hello world', -1, -10)
self.checkequal(False, 'hello', 'startswith', 'ello', -5)
self.checkequal(True, 'hello', 'startswith', 'ello', -4)
self.checkequal(False, 'hello', 'startswith', 'o', -2)
self.checkequal(True, 'hello', 'startswith', 'o', -1)
self.checkequal(True, 'hello', 'startswith', '', -3, -3)
self.checkequal(False, 'hello', 'startswith', 'lo', -9)
self.checkraises(TypeError, 'hello', 'startswith')
self.checkraises(TypeError, 'hello', 'startswith', 42)
# test tuple arguments
self.checkequal(True, 'hello', 'startswith', ('he', 'ha'))
self.checkequal(False, 'hello', 'startswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'startswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'startswith', ())
self.checkequal(True, 'helloworld', 'startswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'startswith', ('hellowo', 'ello',
'rld'), 3)
self.checkequal(True, 'hello', 'startswith', ('lo', 'he'), 0, -1)
self.checkequal(False, 'hello', 'startswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'startswith', ('he', 'hel'), 0, 2)
self.checkraises(TypeError, 'hello', 'startswith', (42,))
def test_endswith(self):
self.checkequal(True, 'hello', 'endswith', 'lo')
self.checkequal(False, 'hello', 'endswith', 'he')
self.checkequal(True, 'hello', 'endswith', '')
self.checkequal(False, 'hello', 'endswith', 'hello world')
self.checkequal(False, 'helloworld', 'endswith', 'worl')
self.checkequal(True, 'helloworld', 'endswith', 'worl', 3, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', 3, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 1, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 2, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 4, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, 8)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 1)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 0)
# test negative indices
self.checkequal(True, 'hello', 'endswith', 'lo', -2)
self.checkequal(False, 'hello', 'endswith', 'he', -2)
self.checkequal(True, 'hello', 'endswith', '', -3, -3)
self.checkequal(False, 'hello', 'endswith', 'hello world', -10, -2)
self.checkequal(False, 'helloworld', 'endswith', 'worl', -6)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, -1)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', -7, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -99, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -8, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -7, -3)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, -4)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', -8, -2)
self.checkraises(TypeError, 'hello', 'endswith')
self.checkraises(TypeError, 'hello', 'endswith', 42)
# test tuple arguments
self.checkequal(False, 'hello', 'endswith', ('he', 'ha'))
self.checkequal(True, 'hello', 'endswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'endswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'endswith', ())
self.checkequal(True, 'helloworld', 'endswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'endswith', ('hellowo', 'ello',
'rld'), 3, -1)
self.checkequal(True, 'hello', 'endswith', ('hell', 'ell'), 0, -1)
self.checkequal(False, 'hello', 'endswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'endswith', ('he', 'hell'), 0, 4)
self.checkraises(TypeError, 'hello', 'endswith', (42,))
def test___contains__(self):
self.checkequal(True, '', '__contains__', '')
self.checkequal(True, 'abc', '__contains__', '')
self.checkequal(False, 'abc', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', '\0')
self.checkequal(True, 'abc\0', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', 'a')
self.checkequal(True, 'asdf', '__contains__', 'asdf')
self.checkequal(False, 'asd', '__contains__', 'asdf')
self.checkequal(False, '', '__contains__', 'asdf')
def test_subscript(self):
self.checkequal(u'a', 'abc', '__getitem__', 0)
self.checkequal(u'c', 'abc', '__getitem__', -1)
self.checkequal(u'a', 'abc', '__getitem__', 0L)
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal(u'a', 'abc', '__getitem__', slice(0, 1))
self.checkequal(u'', 'abc', '__getitem__', slice(0, 0))
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_slice(self):
self.checkequal('abc', 'abc', '__getslice__', 0, 1000)
self.checkequal('abc', 'abc', '__getslice__', 0, 3)
self.checkequal('ab', 'abc', '__getslice__', 0, 2)
self.checkequal('bc', 'abc', '__getslice__', 1, 3)
self.checkequal('b', 'abc', '__getslice__', 1, 2)
self.checkequal('', 'abc', '__getslice__', 2, 2)
self.checkequal('', 'abc', '__getslice__', 1000, 1000)
self.checkequal('', 'abc', '__getslice__', 2000, 1000)
self.checkequal('', 'abc', '__getslice__', 2, 1)
self.checkraises(TypeError, 'abc', '__getslice__', 'def')
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = string.ascii_letters + string.digits
indices = (0, None, 1, 3, 41, -1, -2, -37)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
L = list(s)[start:stop:step]
self.checkequal(u"".join(L), s, '__getitem__',
slice(start, stop, step))
def test_mul(self):
self.checkequal('', 'abc', '__mul__', -1)
self.checkequal('', 'abc', '__mul__', 0)
self.checkequal('abc', 'abc', '__mul__', 1)
self.checkequal('abcabcabc', 'abc', '__mul__', 3)
self.checkraises(TypeError, 'abc', '__mul__')
self.checkraises(TypeError, 'abc', '__mul__', '')
# XXX: on a 64-bit system, this doesn't raise an overflow error,
# but either raises a MemoryError, or succeeds (if you have 54TiB)
#self.checkraises(OverflowError, 10000*'abc', '__mul__', 2000000000)
def test_join(self):
# join now works with any sequence type
# moved here, because the argument order is
# different in string.join (see the test in
# test.test_string.StringTest.test_join)
self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequal('bd', '', 'join', ('', 'b', '', 'd'))
self.checkequal('ac', '', 'join', ('a', '', 'c', ''))
self.checkequal('w x y z', ' ', 'join', Sequence())
self.checkequal('abc', 'a', 'join', ('abc',))
self.checkequal('z', 'a', 'join', UserList(['z']))
if test_support.have_unicode:
self.checkequal(unicode('a.b.c'), unicode('.'), 'join', ['a', 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', [unicode('a'), 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', unicode('b'), 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', 'b', unicode('c')])
self.checkraises(TypeError, '.', 'join', ['a', unicode('b'), 3])
for i in [5, 25, 125]:
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
['a' * i] * i)
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
('a' * i,) * i)
self.checkraises(TypeError, ' ', 'join', BadSeq1())
self.checkequal('a b c', ' ', 'join', BadSeq2())
self.checkraises(TypeError, ' ', 'join')
self.checkraises(TypeError, ' ', 'join', 7)
self.checkraises(TypeError, ' ', 'join', Sequence([7, 'hello', 123L]))
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError, e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
def test_formatting(self):
self.checkequal('+hello+', '+%s+', '__mod__', 'hello')
self.checkequal('+10+', '+%d+', '__mod__', 10)
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('"', "%c", '__mod__', 34)
self.checkequal('$', "%c", '__mod__', 36)
self.checkequal('10', "%d", '__mod__', 10)
self.checkequal('\x7f', "%c", '__mod__', 0x7f)
for ordinal in (-100, 0x200000):
# unicode raises ValueError, str raises OverflowError
self.checkraises((ValueError, OverflowError), '%c', '__mod__', ordinal)
longvalue = sys.maxint + 10L
slongvalue = str(longvalue)
if slongvalue[-1] in ("L","l"): slongvalue = slongvalue[:-1]
self.checkequal(' 42', '%3ld', '__mod__', 42)
self.checkequal('42', '%d', '__mod__', 42L)
self.checkequal('42', '%d', '__mod__', 42.0)
self.checkequal(slongvalue, '%d', '__mod__', longvalue)
self.checkcall('%d', '__mod__', float(longvalue))
self.checkequal('0042.00', '%07.2f', '__mod__', 42)
self.checkequal('0042.00', '%07.2F', '__mod__', 42)
self.checkraises(TypeError, 'abc', '__mod__')
self.checkraises(TypeError, '%(foo)s', '__mod__', 42)
self.checkraises(TypeError, '%s%s', '__mod__', (42,))
self.checkraises(TypeError, '%c', '__mod__', (None,))
self.checkraises(ValueError, '%(foo', '__mod__', {})
self.checkraises(TypeError, '%(foo)s %(bar)s', '__mod__', ('foo', 42))
self.checkraises(TypeError, '%d', '__mod__', "42") # not numeric
self.checkraises(TypeError, '%d', '__mod__', (42+0j)) # no int/long conversion provided
# argument names with properly nested brackets are supported
self.checkequal('bar', '%((foo))s', '__mod__', {'(foo)': 'bar'})
# 100 is a magic number in PyUnicode_Format, this forces a resize
self.checkequal(103*'a'+'x', '%sx', '__mod__', 103*'a')
self.checkraises(TypeError, '%*s', '__mod__', ('foo', 'bar'))
self.checkraises(TypeError, '%10.*f', '__mod__', ('foo', 42.))
self.checkraises(ValueError, '%10', '__mod__', (42,))
def test_floatformatting(self):
# float formatting
for prec in xrange(100):
format = '%%.%if' % prec
value = 0.01
for x in xrange(60):
value = value * 3.14159265359 / 3.0 * 10.0
self.checkcall(format, "__mod__", value)
def test_inplace_rewrites(self):
# Check that strings don't copy and modify cached single-character strings
self.checkequal('a', 'A', 'lower')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'upper')
self.checkequal(True, 'a', 'islower')
self.checkequal('a', 'A', 'replace', 'A', 'a')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'capitalize')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'swapcase')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'title')
self.checkequal(True, 'a', 'islower')
def test_partition(self):
self.checkequal(('this is the par', 'ti', 'tion method'),
'this is the partition method', 'partition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'partition', '://')
self.checkequal(('http://www.python.org', '', ''), S, 'partition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'partition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'partition', 'org')
self.checkraises(ValueError, S, 'partition', '')
self.checkraises(TypeError, S, 'partition', None)
# mixed use of str and unicode
self.assertEqual('a/b/c'.partition(u'/'), ('a', '/', 'b/c'))
def test_rpartition(self):
self.checkequal(('this is the rparti', 'ti', 'on method'),
'this is the rpartition method', 'rpartition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'rpartition', '://')
self.checkequal(('', '', 'http://www.python.org'), S, 'rpartition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'rpartition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'rpartition', 'org')
self.checkraises(ValueError, S, 'rpartition', '')
self.checkraises(TypeError, S, 'rpartition', None)
# mixed use of str and unicode
self.assertEqual('a/b/c'.rpartition(u'/'), ('a/b', '/', 'c'))
def test_none_arguments(self):
# issue 11828
s = 'hello'
self.checkequal(2, s, 'find', 'l', None)
self.checkequal(3, s, 'find', 'l', -2, None)
self.checkequal(2, s, 'find', 'l', None, -2)
self.checkequal(0, s, 'find', 'h', None, None)
self.checkequal(3, s, 'rfind', 'l', None)
self.checkequal(3, s, 'rfind', 'l', -2, None)
self.checkequal(2, s, 'rfind', 'l', None, -2)
self.checkequal(0, s, 'rfind', 'h', None, None)
self.checkequal(2, s, 'index', 'l', None)
self.checkequal(3, s, 'index', 'l', -2, None)
self.checkequal(2, s, 'index', 'l', None, -2)
self.checkequal(0, s, 'index', 'h', None, None)
self.checkequal(3, s, 'rindex', 'l', None)
self.checkequal(3, s, 'rindex', 'l', -2, None)
self.checkequal(2, s, 'rindex', 'l', None, -2)
self.checkequal(0, s, 'rindex', 'h', None, None)
self.checkequal(2, s, 'count', 'l', None)
self.checkequal(1, s, 'count', 'l', -2, None)
self.checkequal(1, s, 'count', 'l', None, -2)
self.checkequal(0, s, 'count', 'x', None, None)
self.checkequal(True, s, 'endswith', 'o', None)
self.checkequal(True, s, 'endswith', 'lo', -2, None)
self.checkequal(True, s, 'endswith', 'l', None, -2)
self.checkequal(False, s, 'endswith', 'x', None, None)
self.checkequal(True, s, 'startswith', 'h', None)
self.checkequal(True, s, 'startswith', 'l', -2, None)
self.checkequal(True, s, 'startswith', 'h', None, -2)
self.checkequal(False, s, 'startswith', 'x', None, None)
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
s = 'hello'
x = 'x'
self.assertRaisesRegexp(TypeError, r'\bfind\b', s.find,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brfind\b', s.rfind,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bindex\b', s.index,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brindex\b', s.rindex,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'^count\(', s.count,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'^startswith\(', s.startswith,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'^endswith\(', s.endswith,
x, None, None, None)
class MixinStrStringUserStringTest:
# Additional tests for 8bit strings, i.e. str, UserString and
# the string module
def test_maketrans(self):
self.assertEqual(
''.join(map(chr, xrange(256))).replace('abc', 'xyz'),
string.maketrans('abc', 'xyz')
)
self.assertRaises(ValueError, string.maketrans, 'abc', 'xyzw')
def test_translate(self):
table = string.maketrans('abc', 'xyz')
self.checkequal('xyzxyz', 'xyzabcdef', 'translate', table, 'def')
table = string.maketrans('a', 'A')
self.checkequal('Abc', 'abc', 'translate', table)
self.checkequal('xyz', 'xyz', 'translate', table)
self.checkequal('yz', 'xyz', 'translate', table, 'x')
self.checkequal('yx', 'zyzzx', 'translate', None, 'z')
self.checkequal('zyzzx', 'zyzzx', 'translate', None, '')
self.checkequal('zyzzx', 'zyzzx', 'translate', None)
self.checkraises(ValueError, 'xyz', 'translate', 'too short', 'strip')
self.checkraises(ValueError, 'xyz', 'translate', 'too short')
class MixinStrUserStringTest:
# Additional tests that only work with
# 8bit compatible object, i.e. str and UserString
if test_support.have_unicode:
def test_encoding_decoding(self):
codecs = [('rot13', 'uryyb jbeyq'),
('base64', 'aGVsbG8gd29ybGQ=\n'),
('hex', '68656c6c6f20776f726c64'),
('uu', 'begin 666 <data>\n+:&5L;&\\@=V]R;&0 \n \nend\n')]
for encoding, data in codecs:
self.checkequal(data, 'hello world', 'encode', encoding)
self.checkequal('hello world', data, 'decode', encoding)
# zlib is optional, so we make the test optional too...
try:
import zlib
except ImportError:
pass
else:
data = 'x\x9c\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\x01\x00\x1a\x0b\x04]'
self.checkequal(data, 'hello world', 'encode', 'zlib')
self.checkequal('hello world', data, 'decode', 'zlib')
self.checkraises(TypeError, 'xyz', 'decode', 42)
self.checkraises(TypeError, 'xyz', 'encode', 42)
class MixinStrUnicodeTest:
# Additional tests that only work with str and unicode.
def test_bug1001011(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
# Check the optimisation still occurs for standard objects.
t = self.type2test
class subclass(t):
pass
s1 = subclass("abcd")
s2 = t().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is t)
s1 = t("abcd")
s2 = t().join([s1])
self.assertTrue(s1 is s2)
# Should also test mixed-type join.
if t is unicode:
s1 = subclass("abcd")
s2 = "".join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is t)
s1 = t("abcd")
s2 = "".join([s1])
self.assertTrue(s1 is s2)
elif t is str:
s1 = subclass("abcd")
s2 = u"".join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is unicode) # promotes!
s1 = t("abcd")
s2 = u"".join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is unicode) # promotes!
else:
self.fail("unexpected type for MixinStrUnicodeTest %r" % t)
|
alsrgv/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/unconnected_gradients.py
|
39
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for calculating gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
from tensorflow.python.util.tf_export import tf_export
@tf_export("UnconnectedGradients")
class UnconnectedGradients(enum.Enum):
"""Controls how gradient computation behaves when y does not depend on x.
The gradient of y with respect to x can be zero in two different ways: there
could be no differentiable path in the graph connecting x to y (and so we can
statically prove that the gradient is zero) or it could be that runtime values
of tensors in a particular execution lead to a gradient of zero (say, if a
relu unit happens to not be activated). To allow you to distinguish between
these two cases you can choose what value gets returned for the gradient when
there is no path in the graph from x to y:
* `NONE`: Indicates that [None] will be returned if there is no path from x
to y
* `ZERO`: Indicates that a zero tensor will be returned in the shape of x.
"""
NONE = "none"
ZERO = "zero"
|
marcuskelly/recover
|
refs/heads/master
|
Lib/site-packages/_pytest/deprecated.py
|
19
|
"""
This module contains deprecation messages and bits of code used elsewhere in the codebase
that is planned to be removed in the next pytest release.
Keeping it in a central location makes it easy to track what is deprecated and should
be removed when the time comes.
"""
MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \
'pass a list of arguments instead.'
YIELD_TESTS = 'yield tests are deprecated, and scheduled to be removed in pytest 4.0'
FUNCARG_PREFIX = (
'{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated '
'and scheduled to be removed in pytest 4.0. '
'Please remove the prefix and use the @pytest.fixture decorator instead.')
SETUP_CFG_PYTEST = '[pytest] section in setup.cfg files is deprecated, use [tool:pytest] instead.'
GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue"
RESULT_LOG = '--result-log is deprecated and scheduled for removal in pytest 4.0'
|
darkleons/BE
|
refs/heads/master
|
addons/account/wizard/account_reconcile.py
|
226
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round
import openerp.addons.decimal_precision as dp
class account_move_line_reconcile(osv.osv_memory):
"""
Account move line reconcile wizard, it checks for the write off the reconcile entry or directly reconcile.
"""
_name = 'account.move.line.reconcile'
_description = 'Account move line reconcile'
_columns = {
'trans_nbr': fields.integer('# of Transaction', readonly=True),
'credit': fields.float('Credit amount', readonly=True, digits_compute=dp.get_precision('Account')),
'debit': fields.float('Debit amount', readonly=True, digits_compute=dp.get_precision('Account')),
'writeoff': fields.float('Write-Off amount', readonly=True, digits_compute=dp.get_precision('Account')),
}
def default_get(self, cr, uid, fields, context=None):
res = super(account_move_line_reconcile, self).default_get(cr, uid, fields, context=context)
data = self.trans_rec_get(cr, uid, context['active_ids'], context)
if 'trans_nbr' in fields:
res.update({'trans_nbr':data['trans_nbr']})
if 'credit' in fields:
res.update({'credit':data['credit']})
if 'debit' in fields:
res.update({'debit':data['debit']})
if 'writeoff' in fields:
res.update({'writeoff':data['writeoff']})
return res
def trans_rec_get(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
credit = debit = 0
account_id = False
count = 0
for line in account_move_line_obj.browse(cr, uid, context['active_ids'], context=context):
if not line.reconcile_id and not line.reconcile_id.id:
count += 1
credit += line.credit
debit += line.debit
account_id = line.account_id.id
precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Account')
writeoff = float_round(debit-credit, precision_digits=precision)
credit = float_round(credit, precision_digits=precision)
debit = float_round(debit, precision_digits=precision)
return {'trans_nbr': count, 'account_id': account_id, 'credit': credit, 'debit': debit, 'writeoff': writeoff}
def trans_rec_addendum_writeoff(self, cr, uid, ids, context=None):
return self.pool.get('account.move.line.reconcile.writeoff').trans_rec_addendum(cr, uid, ids, context)
def trans_rec_reconcile_partial_reconcile(self, cr, uid, ids, context=None):
return self.pool.get('account.move.line.reconcile.writeoff').trans_rec_reconcile_partial(cr, uid, ids, context)
def trans_rec_reconcile_full(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
period_obj = self.pool.get('account.period')
date = False
period_id = False
journal_id= False
account_id = False
if context is None:
context = {}
date = time.strftime('%Y-%m-%d')
ids = period_obj.find(cr, uid, dt=date, context=context)
if ids:
period_id = ids[0]
account_move_line_obj.reconcile(cr, uid, context['active_ids'], 'manual', account_id,
period_id, journal_id, context=context)
return {'type': 'ir.actions.act_window_close'}
class account_move_line_reconcile_writeoff(osv.osv_memory):
"""
It opens the write off wizard form, in that user can define the journal, account, analytic account for reconcile
"""
_name = 'account.move.line.reconcile.writeoff'
_description = 'Account move line reconcile (writeoff)'
_columns = {
'journal_id': fields.many2one('account.journal','Write-Off Journal', required=True),
'writeoff_acc_id': fields.many2one('account.account','Write-Off account', required=True),
'date_p': fields.date('Date'),
'comment': fields.char('Comment', required=True),
'analytic_id': fields.many2one('account.analytic.account', 'Analytic Account', domain=[('parent_id', '!=', False)]),
}
_defaults = {
'date_p': lambda *a: time.strftime('%Y-%m-%d'),
'comment': _('Write-off'),
}
def trans_rec_addendum(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
model_data_ids = mod_obj.search(cr, uid,[('model','=','ir.ui.view'),('name','=','account_move_line_reconcile_writeoff')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Reconcile Writeoff'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.move.line.reconcile.writeoff',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
def trans_rec_reconcile_partial(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
account_move_line_obj.reconcile_partial(cr, uid, context['active_ids'], 'manual', context=context)
return {'type': 'ir.actions.act_window_close'}
def trans_rec_reconcile(self, cr, uid, ids, context=None):
context = dict(context or {})
account_move_line_obj = self.pool.get('account.move.line')
period_obj = self.pool.get('account.period')
if context is None:
context = {}
data = self.read(cr, uid, ids,context=context)[0]
account_id = data['writeoff_acc_id'][0]
context['date_p'] = data['date_p']
journal_id = data['journal_id'][0]
context['comment'] = data['comment']
if data['analytic_id']:
context['analytic_id'] = data['analytic_id'][0]
if context['date_p']:
date = context['date_p']
ids = period_obj.find(cr, uid, dt=date, context=context)
if ids:
period_id = ids[0]
account_move_line_obj.reconcile(cr, uid, context['active_ids'], 'manual', account_id,
period_id, journal_id, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hhkaos/awesome-arcgis
|
refs/heads/master
|
node_modules/gitbook/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSNew.py
|
1835
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
|
Sidney84/pa-chromium
|
refs/heads/master
|
build/android/pylib/utils/reraiser_thread.py
|
36
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Thread and ThreadGroup that reraise exceptions on the main thread."""
import logging
import sys
import threading
import time
import traceback
import watchdog_timer
class TimeoutError(Exception):
"""Module-specific timeout exception."""
pass
class ReraiserThread(threading.Thread):
"""Thread class that can reraise exceptions."""
def __init__(self, func, args=[], kwargs={}, name=None):
"""Initialize thread.
Args:
func: callable to call on a new thread.
args: list of positional arguments for callable, defaults to empty.
kwargs: dictionary of keyword arguments for callable, defaults to empty.
name: thread name, defaults to Thread-N.
"""
super(ReraiserThread, self).__init__(name=name)
self.daemon = True
self._func = func
self._args = args
self._kwargs = kwargs
self._exc_info = None
def ReraiseIfException(self):
"""Reraise exception if an exception was raised in the thread."""
if self._exc_info:
raise self._exc_info[0], self._exc_info[1], self._exc_info[2]
#override
def run(self):
"""Overrides Thread.run() to add support for reraising exceptions."""
try:
self._func(*self._args, **self._kwargs)
except:
self._exc_info = sys.exc_info()
raise
class ReraiserThreadGroup(object):
"""A group of ReraiserThread objects."""
def __init__(self, threads=[]):
"""Initialize thread group.
Args:
threads: a list of ReraiserThread objects; defaults to empty.
"""
self._threads = threads
def Add(self, thread):
"""Add a thread to the group.
Args:
thread: a ReraiserThread object.
"""
self._threads.append(thread)
def StartAll(self):
"""Start all threads."""
for thread in self._threads:
thread.start()
def _JoinAll(self, watcher=watchdog_timer.WatchdogTimer(None)):
"""Join all threads without stack dumps.
Reraises exceptions raised by the child threads and supports breaking
immediately on exceptions raised on the main thread.
Args:
watcher: Watchdog object providing timeout, by default waits forever.
"""
alive_threads = self._threads[:]
while alive_threads:
for thread in alive_threads[:]:
if watcher.IsTimedOut():
raise TimeoutError('Timed out waiting for %d of %d threads.' %
(len(alive_threads), len(self._threads)))
# Allow the main thread to periodically check for interrupts.
thread.join(0.1)
if not thread.isAlive():
alive_threads.remove(thread)
# All threads are allowed to complete before reraising exceptions.
for thread in self._threads:
thread.ReraiseIfException()
def JoinAll(self, watcher=watchdog_timer.WatchdogTimer(None)):
"""Join all threads.
Reraises exceptions raised by the child threads and supports breaking
immediately on exceptions raised on the main thread. Unfinished threads'
stacks will be logged on watchdog timeout.
Args:
watcher: Watchdog object providing timeout, by default waits forever.
"""
try:
self._JoinAll(watcher)
except TimeoutError:
for thread in (t for t in self._threads if t.isAlive()):
stack = sys._current_frames()[thread.ident]
logging.critical('*' * 80)
logging.critical('Stack dump for timed out thread \'%s\'', thread.name)
logging.critical('*' * 80)
for filename, lineno, name, line in traceback.extract_stack(stack):
logging.critical('File: "%s", line %d, in %s', filename, lineno, name)
if line:
logging.critical(' %s', line.strip())
logging.critical('*' * 80)
raise
|
chrber/dcache-docker
|
refs/heads/master
|
dcache/deps/.vim/bundle/jedi-vim/jedi/test/test_evaluate/test_pyc.py
|
26
|
"""
Test completions from *.pyc files:
- generate a dummy python module
- compile the dummy module to generate a *.pyc
- delete the pure python dummy module
- try jedi on the generated *.pyc
"""
import os
import shutil
import sys
import pytest
import jedi
from ..helpers import cwd_at
SRC = """class Foo:
pass
class Bar:
pass
"""
def generate_pyc():
os.mkdir("dummy_package")
with open("dummy_package/__init__.py", 'w'):
pass
with open("dummy_package/dummy.py", 'w') as f:
f.write(SRC)
import compileall
compileall.compile_file("dummy_package/dummy.py")
os.remove("dummy_package/dummy.py")
if sys.version_info[0] == 3:
# Python3 specific:
# To import pyc modules, we must move them out of the __pycache__
# directory and rename them to remove ".cpython-%s%d"
# see: http://stackoverflow.com/questions/11648440/python-does-not-detect-pyc-files
for f in os.listdir("dummy_package/__pycache__"):
dst = f.replace('.cpython-%s%s' % sys.version_info[:2], "")
dst = os.path.join("dummy_package", dst)
shutil.copy(os.path.join("dummy_package/__pycache__", f), dst)
# Python 2.6 does not necessarily come with `compileall.compile_file`.
@pytest.mark.skipif("sys.version_info > (2,6)")
@cwd_at('test/test_evaluate')
def test_pyc():
"""
The list of completion must be greater than 2.
"""
try:
generate_pyc()
s = jedi.Script("from dummy_package import dummy; dummy.", path='blub.py')
assert len(s.completions()) >= 2
finally:
shutil.rmtree("dummy_package")
if __name__ == "__main__":
test_pyc()
|
HybridF5/tempest
|
refs/heads/master
|
tempest/api/network/test_extra_dhcp_options.py
|
8
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import test
class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest):
"""Tests the following operations with the Extra DHCP Options:
port create
port list
port show
port update
v2.0 of the Neutron API is assumed. It is also assumed that the Extra
DHCP Options extension is enabled in the [network-feature-enabled]
section of etc/tempest.conf
"""
@classmethod
def skip_checks(cls):
super(ExtraDHCPOptionsTestJSON, cls).skip_checks()
if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
msg = "Extra DHCP Options extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(ExtraDHCPOptionsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.port = cls.create_port(cls.network)
cls.ip_tftp = ('123.123.123.123' if cls._ip_version == 4
else '2015::dead')
cls.ip_server = ('123.123.123.45' if cls._ip_version == 4
else '2015::badd')
cls.extra_dhcp_opts = [
{'opt_value': 'pxelinux.0', 'opt_name': 'bootfile-name'},
{'opt_value': cls.ip_tftp, 'opt_name': 'tftp-server'},
{'opt_value': cls.ip_server, 'opt_name': 'server-ip-address'}
]
@test.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
def test_create_list_port_with_extra_dhcp_options(self):
# Create a port with Extra DHCP Options
body = self.ports_client.create_port(
network_id=self.network['id'],
extra_dhcp_opts=self.extra_dhcp_opts)
port_id = body['port']['id']
self.addCleanup(self.ports_client.delete_port, port_id)
# Confirm port created has Extra DHCP Options
body = self.ports_client.list_ports()
ports = body['ports']
port = [p for p in ports if p['id'] == port_id]
self.assertTrue(port)
self._confirm_extra_dhcp_options(port[0], self.extra_dhcp_opts)
@test.idempotent_id('9a6aebf4-86ee-4f47-b07a-7f7232c55607')
def test_update_show_port_with_extra_dhcp_options(self):
# Update port with extra dhcp options
name = data_utils.rand_name('new-port-name')
body = self.ports_client.update_port(
self.port['id'],
name=name,
extra_dhcp_opts=self.extra_dhcp_opts)
# Confirm extra dhcp options were added to the port
body = self.ports_client.show_port(self.port['id'])
self._confirm_extra_dhcp_options(body['port'], self.extra_dhcp_opts)
def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts):
retrieved = port['extra_dhcp_opts']
self.assertEqual(len(retrieved), len(extra_dhcp_opts))
for retrieved_option in retrieved:
for option in extra_dhcp_opts:
if (retrieved_option['opt_value'] == option['opt_value'] and
retrieved_option['opt_name'] == option['opt_name']):
break
else:
self.fail('Extra DHCP option not found in port %s' %
str(retrieved_option))
class ExtraDHCPOptionsIpV6TestJSON(ExtraDHCPOptionsTestJSON):
_ip_version = 6
|
aidan-/ansible-modules-extras
|
refs/heads/devel
|
network/illumos/ipadm_if.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: ipadm_if
short_description: Manage IP interfaces on Solaris/illumos systems.
description:
- Create, delete, enable or disable IP interfaces on Solaris/illumos
systems.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
name:
description:
- IP interface name.
required: true
temporary:
description:
- Specifies that the IP interface is temporary. Temporary IP
interfaces do not persist across reboots.
required: false
default: false
choices: [ "true", "false" ]
state:
description:
- Create or delete Solaris/illumos IP interfaces.
required: false
default: "present"
choices: [ "present", "absent", "enabled", "disabled" ]
'''
EXAMPLES = '''
# Create vnic0 interface
- ipadm_if:
name: vnic0
state: enabled
# Disable vnic0 interface
- ipadm_if:
name: vnic0
state: disabled
'''
RETURN = '''
name:
description: IP interface name
returned: always
type: string
sample: "vnic0"
state:
description: state of the target
returned: always
type: string
sample: "present"
temporary:
description: persistence of a IP interface
returned: always
type: boolean
sample: "True"
'''
class IPInterface(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.temporary = module.params['temporary']
self.state = module.params['state']
def interface_exists(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('show-if')
cmd.append(self.name)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def interface_is_disabled(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('show-if')
cmd.append('-o')
cmd.append('state')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(name=self.name, rc=rc, msg=err)
return 'disabled' in out
def create_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('create-if')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def delete_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('delete-if')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def enable_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('enable-if')
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def disable_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('disable-if')
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
temporary=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent',
'present',
'enabled',
'disabled']),
),
supports_check_mode=True
)
interface = IPInterface(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = interface.name
result['state'] = interface.state
result['temporary'] = interface.temporary
if interface.state == 'absent':
if interface.interface_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = interface.delete_interface()
if rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'present':
if not interface.interface_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = interface.create_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'enabled':
if interface.interface_is_disabled():
(rc, out, err) = interface.enable_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'disabled':
if not interface.interface_is_disabled():
(rc, out, err) = interface.disable_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
from ansible.module_utils.basic import *
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.