repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ganeshgore/myremolab
|
refs/heads/master
|
server/launch/sample_balanced2/debugging.py
|
5
|
SERVERS = [
('127.0.0.1','31337'),
('127.0.0.1','31338'),
('127.0.0.1','31339'),
]
PORTS = {
'soap' : (10123,20123,30123),
'json' : (18345,28345,38345),
'xmlrpc' : (19345,29345,39345),
'soap_login' : (10623,20623,30623),
'json_login' : (18645,28645,38645),
'xmlrpc_login' : (19645,29645,39645)
}
|
kdwink/intellij-community
|
refs/heads/master
|
python/testData/findUsages/GlobalUsages.py
|
83
|
<caret>search_variable = 1
def function():
global search_variable
search_variable = 2
print(search_variable)
|
MrHarcombe/python-gpiozero
|
refs/heads/master
|
docs/examples/led_board_2.py
|
3
|
from gpiozero import LEDBoard
from signal import pause
leds = LEDBoard(5, 6, 13, 19, 26, pwm=True)
leds.value = (0.2, 0.4, 0.6, 0.8, 1.0)
pause()
|
peter-jang/ansible-modules-core
|
refs/heads/devel
|
cloud/amazon/_ec2_ami_search.py
|
35
|
#!/usr/bin/python
#
# (c) 2013, Nimbis Services
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_search
short_description: Retrieve AWS AMI information for a given operating system.
deprecated: "in favor of the ec2_ami_find module"
version_added: "1.6"
description:
- Look up the most recent AMI on AWS for a given operating system.
- Returns C(ami), C(aki), C(ari), C(serial), C(tag)
- If there is no AKI or ARI associated with an image, these will be C(null).
- Only supports images from cloud-images.ubuntu.com
- 'Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"})'
options:
distro:
description: Linux distribution (e.g., C(ubuntu))
required: true
choices: ["ubuntu"]
release:
description: short name of the release (e.g., C(precise))
required: true
stream:
description: Type of release.
required: false
default: "server"
choices: ["server", "desktop"]
store:
description: Back-end store for instance
required: false
default: "ebs"
choices: ["ebs", "ebs-io1", "ebs-ssd", "instance-store"]
arch:
description: CPU architecture
required: false
default: "amd64"
choices: ["i386", "amd64"]
region:
description: EC2 region
required: false
default: us-east-1
choices: ["ap-northeast-1", "ap-southeast-1", "ap-northeast-2",
"ap-southeast-2", "eu-central-1", "eu-west-1", "sa-east-1",
"us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"]
virt:
description: virutalization type
required: false
default: paravirtual
choices: ["paravirtual", "hvm"]
author: "Ansible Core Team (deprecated)"
'''
EXAMPLES = '''
- name: Launch an Ubuntu 12.04 (Precise Pangolin) EC2 instance
hosts: 127.0.0.1
connection: local
tasks:
- name: Get the Ubuntu precise AMI
ec2_ami_search: distro=ubuntu release=precise region=us-west-1 store=instance-store
register: ubuntu_image
- name: Start the EC2 instance
ec2: image={{ ubuntu_image.ami }} instance_type=m1.small key_name=mykey
'''
import csv
import json
import urlparse
SUPPORTED_DISTROS = ['ubuntu']
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-northeast-2',
'ap-southeast-2',
'ap-south-1',
'eu-central-1',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2',
"us-gov-west-1"]
def get_url(module, url):
""" Get url and return response """
r, info = fetch_url(module, url)
if info['status'] != 200:
# Backwards compat
info['status_code'] = info['status']
module.fail_json(**info)
return r
def ubuntu(module):
""" Get the ami for ubuntu """
release = module.params['release']
stream = module.params['stream']
store = module.params['store']
arch = module.params['arch']
region = module.params['region']
virt = module.params['virt']
url = get_ubuntu_url(release, stream)
req = get_url(module, url)
reader = csv.reader(req, delimiter='\t')
try:
ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream,
store, arch, region, virt)
module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag,
serial=serial)
except KeyError:
module.fail_json(msg="No matching AMI found")
def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt):
""" Look up the Ubuntu AMI that matches query given a table of AMIs
table: an iterable that returns a row of
(release, stream, tag, serial, region, ami, aki, ari, virt)
release: ubuntu release name
stream: 'server' or 'desktop'
store: 'ebs', 'ebs-io1', 'ebs-ssd' or 'instance-store'
arch: 'i386' or 'amd64'
region: EC2 region
virt: 'paravirtual' or 'hvm'
Returns (ami, aki, ari, tag, serial)"""
expected = (release, stream, store, arch, region, virt)
for row in table:
(actual_release, actual_stream, tag, serial,
actual_store, actual_arch, actual_region, ami, aki, ari,
actual_virt) = row
actual = (actual_release, actual_stream, actual_store, actual_arch,
actual_region, actual_virt)
if actual == expected:
# aki and ari are sometimes blank
if aki == '':
aki = None
if ari == '':
ari = None
return (ami, aki, ari, tag, serial)
raise KeyError()
def get_ubuntu_url(release, stream):
url = "https://cloud-images.ubuntu.com/query/%s/%s/released.current.txt"
return url % (release, stream)
def main():
arg_spec = dict(
distro=dict(required=True, choices=SUPPORTED_DISTROS),
release=dict(required=True),
stream=dict(required=False, default='server',
choices=['desktop', 'server']),
store=dict(required=False, default='ebs',
choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']),
arch=dict(required=False, default='amd64',
choices=['i386', 'amd64']),
region=dict(required=False, default='us-east-1', choices=AWS_REGIONS),
virt=dict(required=False, default='paravirtual',
choices=['paravirtual', 'hvm']),
)
module = AnsibleModule(argument_spec=arg_spec)
distro = module.params['distro']
if distro == 'ubuntu':
ubuntu(module)
else:
module.fail_json(msg="Unsupported distro: %s" % distro)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
exobrain-wisekb/wisekb-management-platform
|
refs/heads/master
|
wisekb-uima-ducc/cassandra-server/pylib/cqlshlib/test/cassconnect.py
|
42
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import contextlib
import tempfile
import os.path
from .basecase import cql, cqlsh, cqlshlog, TEST_HOST, TEST_PORT, rundir
from .run_cqlsh import run_cqlsh, call_cqlsh
test_keyspace_init = os.path.join(rundir, 'test_keyspace_init.cql')
def get_cassandra_connection(cql_version=cqlsh.DEFAULT_CQLVER):
if cql_version is None:
cql_version = cqlsh.DEFAULT_CQLVER
conn = cql((TEST_HOST,), TEST_PORT, cql_version=cql_version)
# until the cql lib does this for us
conn.cql_version = cql_version
return conn
def get_cassandra_cursor(cql_version=cqlsh.DEFAULT_CQLVER):
return get_cassandra_connection(cql_version=cql_version).cursor()
TEST_KEYSPACES_CREATED = []
def get_test_keyspace():
return TEST_KEYSPACES_CREATED[-1]
def make_test_ks_name():
# abuse mktemp to get a quick random-ish name
return os.path.basename(tempfile.mktemp(prefix='CqlshTests_'))
def create_test_keyspace(cursor):
ksname = make_test_ks_name()
qksname = quote_name(ksname)
cursor.execute('''
CREATE KEYSPACE %s WITH replication =
{'class': 'SimpleStrategy', 'replication_factor': 1};
''' % quote_name(ksname))
cursor.execute('USE %s;' % qksname)
TEST_KEYSPACES_CREATED.append(ksname)
return ksname
def split_cql_commands(source):
ruleset = cql_rule_set()
statements, in_batch = ruleset.cql_split_statements(source)
if in_batch:
raise ValueError("CQL source ends unexpectedly")
return [ruleset.cql_extract_orig(toks, source) for toks in statements if toks]
def execute_cql_commands(cursor, source, logprefix='INIT: '):
for cql in split_cql_commands(source):
cqlshlog.debug(logprefix + cql)
cursor.execute(cql)
def execute_cql_file(cursor, fname):
with open(fname) as f:
return execute_cql_commands(cursor, f.read())
def create_test_db():
with cassandra_cursor(ks=None) as c:
k = create_test_keyspace(c)
execute_cql_file(c, test_keyspace_init)
return k
def remove_test_db():
with cassandra_cursor(ks=None) as c:
c.execute('DROP KEYSPACE %s' % quote_name(TEST_KEYSPACES_CREATED.pop(-1)))
@contextlib.contextmanager
def cassandra_connection(cql_version=cqlsh.DEFAULT_CQLVER):
"""
Make a Cassandra CQL connection with the given CQL version and get a cursor
for it, and optionally connect to a given keyspace.
The connection is returned as the context manager's value, and it will be
closed when the context exits.
"""
conn = get_cassandra_connection(cql_version=cql_version)
try:
yield conn
finally:
conn.close()
@contextlib.contextmanager
def cassandra_cursor(cql_version=None, ks=''):
"""
Make a Cassandra CQL connection with the given CQL version and get a cursor
for it, and optionally connect to a given keyspace. If ks is the empty
string (default), connect to the last test keyspace created. If ks is None,
do not connect to any keyspace. Otherwise, attempt to connect to the
keyspace named.
The cursor is returned as the context manager's value, and the connection
will be closed when the context exits.
"""
if ks == '':
ks = get_test_keyspace()
conn = get_cassandra_connection(cql_version=cql_version)
try:
c = conn.connect(ks)
# if ks is not None:
# c.execute('USE %s;' % quote_name(c, ks))
yield c
finally:
conn.shutdown()
def cql_rule_set():
return cqlsh.cql3handling.CqlRuleSet
def quote_name(name):
return cql_rule_set().maybe_escape_name(name)
class DEFAULTVAL: pass
def testrun_cqlsh(keyspace=DEFAULTVAL, **kwargs):
# use a positive default sentinel so that keyspace=None can be used
# to override the default behavior
if keyspace is DEFAULTVAL:
keyspace = get_test_keyspace()
return run_cqlsh(keyspace=keyspace, **kwargs)
def testcall_cqlsh(keyspace=None, **kwargs):
if keyspace is None:
keyspace = get_test_keyspace()
return call_cqlsh(keyspace=keyspace, **kwargs)
|
BBVA/chaos-monkey-engine
|
refs/heads/master
|
test/attacks/required_schema.py
|
1
|
from chaosmonkey.attacks.attack import Attack
class RequiredSchema(Attack):
ref = "required_schema:RequiredSchema"
schema = {
"type": "object",
"properties": {
"ref": {"type": "string"},
"args": {
"type": "object",
"properties": {
"property1": {"type": "string"},
"property2": {"type": "string"},
},
"required": ["property1", "property2"]
}
}
}
example = {
"ref": ref,
"args": {
"property1": "test1",
"property2": "test2"
}
}
def __init__(self, attack_config):
super(RequiredSchema, self).__init__(attack_config)
def run(self):
pass
@staticmethod
def to_dict():
return Attack._to_dict(
RequiredSchema.ref,
RequiredSchema.schema,
RequiredSchema.example
)
|
jwlawson/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/session_ops.py
|
49
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor Handle Operations. See the @{$python/session_ops} guide.
@@get_session_handle
@@get_session_handle_v2
@@get_session_tensor
@@delete_session_tensor
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import resource_handle_pb2
from tensorflow.python import pywrap_tensorflow_internal
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.util import compat
def encode_resource_handle(resource_handle):
"""Encode a ResourceHandle proto as custom numpy struct type."""
return np.asarray(bytearray(resource_handle.SerializeToString()),
dtype=dtypes.np_resource)
class TensorHandle(object):
"""Represents a handle for a live tensor in a session."""
def __init__(self, handle, dtype, session):
"""Constructs a new tensor handle.
A tensor handle for a persistent tensor is a python string
that has the form of "tensor_name;unique_id;device_name".
Args:
handle: A tensor handle.
dtype: The data type of the tensor represented by `handle`.
session: The session in which the tensor is produced.
"""
self._handle = compat.as_str_any(handle)
self._resource_handle = None
self._dtype = dtype
self._session = session
self._auto_gc_enabled = True
def __del__(self):
if self._auto_gc_enabled:
self._session._register_dead_handle(self.handle)
def __str__(self):
return self._handle
def _get_resource_handle(self):
"""The ResourceHandle representation of this handle."""
if not self._resource_handle:
self._resource_handle = resource_handle_pb2.ResourceHandleProto()
self._resource_handle.device = self._handle.split(";")[-1]
self._resource_handle.container = (
pywrap_tensorflow_internal.TENSOR_HANDLE_KEY)
self._resource_handle.name = self._handle
return self._resource_handle
def to_numpy_array(self):
"""Convert a TensorHandle object to a feedable numpy value.
Returns:
A numpy array of a custom struct type that can be used as a feed value
to run().
"""
return encode_resource_handle(self._get_resource_handle())
@property
def handle(self):
"""The string representation of this handle."""
return self._handle
def eval(self):
"""Return the value of the tensor represented by this handle."""
if not self._auto_gc_enabled:
raise TypeError("Persistent tensor %s may have already been deleted."
% self.handle)
holder, reader = _get_handle_reader(self._session.graph, self._handle,
self._dtype)
return self._session.run(reader, feed_dict={holder: self._handle})
def delete(self):
"""Force the deletion of this persistent tensor."""
if not self._auto_gc_enabled:
raise TypeError("Persistent tensor %s may have already been deleted."
% self.handle)
self._auto_gc_enabled = False
holder, deleter = _get_handle_deleter(self._session.graph, 0, self._handle)
self._session.run(deleter, feed_dict={holder: self.handle})
def get_raw_handle(self):
"""Return the raw handle of the tensor.
Note that the method disables the automatic garbage collection of this
persistent tensor. The caller is now responsible for managing the life
time of the tensor.
"""
self._auto_gc_enabled = False
return self._handle
@staticmethod
def _get_device_name(handle):
"""The device name encoded in the handle."""
handle_str = compat.as_str_any(handle)
return pydev.canonical_name(handle_str.split(";")[-1])
@staticmethod
def _get_reader_key(handle):
"""The graph key for reader."""
handle_parts = str(handle).split(";")
return handle_parts[0] + ";" + handle_parts[-1]
@staticmethod
def _get_mover_key(feeder, handle):
"""The graph key for mover."""
return feeder.op.name + ";" + TensorHandle._get_reader_key(handle)
def get_session_handle(data, name=None):
"""Return the handle of `data`.
This is EXPERIMENTAL and subject to change.
Keep `data` "in-place" in the runtime and create a handle that can be
used to retrieve `data` in a subsequent run().
Combined with `get_session_tensor`, we can keep a tensor produced in
one run call in place, and use it as the input in a future run call.
Args:
data: A tensor to be stored in the session.
name: Optional name prefix for the return tensor.
Returns:
A scalar string tensor representing a unique handle for `data`.
Raises:
TypeError: if `data` is not a Tensor.
Example:
```python
c = tf.multiply(a, b)
h = tf.get_session_handle(c)
h = sess.run(h)
p, a = tf.get_session_tensor(h.handle, tf.float32)
b = tf.multiply(a, 10)
c = sess.run(b, feed_dict={p: h.handle})
```
"""
if not isinstance(data, ops.Tensor):
raise TypeError("`data` must be of type Tensor.")
# Colocate this operation with data.
with ops.colocate_with(data):
return gen_data_flow_ops._get_session_handle(data, name=name) # pylint: disable=protected-access
def get_session_tensor(handle, dtype, name=None):
"""Get the tensor of type `dtype` by feeding a tensor handle.
This is EXPERIMENTAL and subject to change.
Get the value of the tensor from a tensor handle. The tensor
is produced in a previous run() and stored in the state of the
session.
Args:
handle: The string representation of a persistent tensor handle.
dtype: The type of the output tensor.
name: Optional name prefix for the return tensor.
Returns:
A pair of tensors. The first is a placeholder for feeding a
tensor handle and the second is the tensor in the session state
keyed by the tensor handle.
Example:
```python
c = tf.multiply(a, b)
h = tf.get_session_handle(c)
h = sess.run(h)
p, a = tf.get_session_tensor(h.handle, tf.float32)
b = tf.multiply(a, 10)
c = sess.run(b, feed_dict={p: h.handle})
```
"""
handle_device = TensorHandle._get_device_name(handle)
with ops.device(handle_device):
holder = array_ops.placeholder(dtypes.string)
_register_handle_feeder(holder.graph, holder, dtype)
tensor = gen_data_flow_ops._get_session_tensor(holder, dtype, name=name)
return (holder, tensor)
def delete_session_tensor(handle, name=None):
"""Delete the tensor for the given tensor handle.
This is EXPERIMENTAL and subject to change.
Delete the tensor of a given tensor handle. The tensor is produced
in a previous run() and stored in the state of the session.
Args:
handle: The string representation of a persistent tensor handle.
name: Optional name prefix for the return tensor.
Returns:
A pair of graph elements. The first is a placeholder for feeding a
tensor handle and the second is a deletion operation.
"""
handle_device = TensorHandle._get_device_name(handle)
with ops.device(handle_device):
holder = array_ops.placeholder(dtypes.string)
deleter = gen_data_flow_ops._delete_session_tensor(holder, name=name)
return (holder, deleter)
def _register_handle_feeder(graph, feeder, dtype):
graph._handle_feeders[feeder.op.name] = dtype
def _get_handle_feeder(graph, feeder):
return graph._handle_feeders.get(feeder.op.name)
def _get_handle_reader(graph, handle, dtype):
"""Return a read subgraph for this handle."""
graph_key = TensorHandle._get_reader_key(handle)
result = graph._handle_readers.get(graph_key)
if result is None:
# Create reader if we haven't done it.
handle_device = TensorHandle._get_device_name(handle)
with graph.as_default(), graph.device(handle_device):
holder = array_ops.placeholder(dtypes.string)
_register_handle_feeder(holder.graph, holder, dtype)
reader = gen_data_flow_ops._get_session_tensor(holder, dtype)
result = (holder, reader)
graph._handle_readers[graph_key] = result
return result
def _get_handle_mover(graph, feeder, handle):
"""Return a move subgraph for this pair of feeder and handle."""
dtype = _get_handle_feeder(graph, feeder)
if dtype is None:
return None
handle_device = TensorHandle._get_device_name(handle)
if feeder.op.device == handle_device:
return None
# Now we know we have to move the tensor.
graph_key = TensorHandle._get_mover_key(feeder, handle)
result = graph._handle_movers.get(graph_key)
if result is None:
# Create mover if we haven't done it.
holder, reader = _get_handle_reader(graph, handle, dtype)
with graph.as_default(), graph.device(feeder.op.device):
mover = gen_data_flow_ops._get_session_handle(reader) # pylint: disable=protected-access
result = (holder, mover)
graph._handle_movers[graph_key] = result
return result
def _get_handle_deleter(graph, deleter_key, handle):
"""Return a deletion subgraph for this handle."""
result = graph._handle_deleters.get(deleter_key)
if result is None:
# Create deleter if we haven't done it.
handle_device = TensorHandle._get_device_name(handle)
with graph.as_default(), graph.device(handle_device):
holder = array_ops.placeholder(dtypes.string)
deleter = gen_data_flow_ops._delete_session_tensor(holder)
result = (holder, deleter)
graph._handle_deleters[deleter_key] = result
return result
|
nikitph/bluddy-app-engine
|
refs/heads/master
|
lib/werkzeug/contrib/testtools.py
|
319
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.testtools
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements extended wrappers for simplified testing.
`TestResponse`
A response wrapper which adds various cached attributes for
simplified assertions on various content types.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.utils import cached_property, import_string
from werkzeug.wrappers import Response
from warnings import warn
warn(DeprecationWarning('werkzeug.contrib.testtools is deprecated and '
'will be removed with Werkzeug 1.0'))
class ContentAccessors(object):
"""
A mixin class for response objects that provides a couple of useful
accessors for unittesting.
"""
def xml(self):
"""Get an etree if possible."""
if 'xml' not in self.mimetype:
raise AttributeError(
'Not a XML response (Content-Type: %s)'
% self.mimetype)
for module in ['xml.etree.ElementTree', 'ElementTree',
'elementtree.ElementTree']:
etree = import_string(module, silent=True)
if etree is not None:
return etree.XML(self.body)
raise RuntimeError('You must have ElementTree installed '
'to use TestResponse.xml')
xml = cached_property(xml)
def lxml(self):
"""Get an lxml etree if possible."""
if ('html' not in self.mimetype and 'xml' not in self.mimetype):
raise AttributeError('Not an HTML/XML response')
from lxml import etree
try:
from lxml.html import fromstring
except ImportError:
fromstring = etree.HTML
if self.mimetype=='text/html':
return fromstring(self.data)
return etree.XML(self.data)
lxml = cached_property(lxml)
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.mimetype:
raise AttributeError('Not a JSON response')
try:
from simplejson import loads
except ImportError:
from json import loads
return loads(self.data)
json = cached_property(json)
class TestResponse(Response, ContentAccessors):
"""Pass this to `werkzeug.test.Client` for easier unittesting."""
|
paplorinc/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnresolvedReferencesInspection/incorrectFileLevelMetaclass.py
|
79
|
class __metaclass__(object):
pass
class C(object):
def foo(self):
pass
c = C()
c.foo()
|
carnotweat/cpupimp
|
refs/heads/master
|
libs/git/files.py
|
122
|
# Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ModifiedFile(object):
def __init__(self, filename):
super(ModifiedFile, self).__init__()
self.filename = filename
def __repr__(self):
return self.filename
def __eq__(self, other):
return isinstance(other, ModifiedFile) and other.filename == self.filename
|
niranjan94/open-event-orga-server
|
refs/heads/master
|
app/api/exports.py
|
5
|
import os
from flask import send_file, make_response, jsonify, url_for, current_app
from flask.ext.restplus import Resource, Namespace, marshal
from app.helpers.data import record_activity
from helpers import custom_fields as fields
from helpers.export_helpers import export_event_json, create_export_job, send_export_mail
from helpers.helpers import nocache, can_access, requires_auth
from helpers.utils import TASK_RESULTS
api = Namespace('exports', description='Exports', path='/')
EXPORT_SETTING = api.model('ExportSetting', {
'image': fields.Boolean(default=False),
'video': fields.Boolean(default=False),
'document': fields.Boolean(default=False),
'audio': fields.Boolean(default=False)
})
@nocache
@api.route('/events/<int:event_id>/export/json')
@api.hide
class EventExportJson(Resource):
@requires_auth
@can_access
@api.expect(EXPORT_SETTING)
def post(self, event_id):
from helpers.tasks import export_event_task
# queue task
task = export_event_task.delay(
event_id, marshal(self.api.payload, EXPORT_SETTING))
# create Job
try:
create_export_job(task.id, event_id)
except Exception:
pass
# in case of testing
if current_app.config.get('CELERY_ALWAYS_EAGER'):
send_export_mail(event_id, task.get())
TASK_RESULTS[task.id] = {
'result': task.get(),
'state': task.state
}
return jsonify(
task_url=url_for('api.extras_celery_task', task_id=task.id)
)
@nocache
@api.hide
@api.route('/events/<int:event_id>/exports/<path:path>')
class ExportDownload(Resource):
def get(self, event_id, path):
if not path.startswith('/'):
path = '/' + path
if not os.path.isfile(path):
return 'Not Found', 404
response = make_response(send_file(path))
response.headers['Content-Disposition'] = 'attachment; filename=event%d.zip' % event_id
record_activity('export_event', event_id=event_id)
return response
def event_export_task_base(event_id, settings):
path = export_event_json(event_id, settings)
if path.startswith('/'):
path = path[1:]
return path
|
open-motivation/motivation-ide
|
refs/heads/master
|
ninja_ide/dependencies/pep8mod.py
|
2
|
#!/usr/bin/env python
# pep8.py - Check Python source code formatting, according to PEP 8
# Copyright (C) 2006 Johann C. Rocholl <johann@rocholl.net>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""
Check Python source code formatting, according to PEP 8:
http://www.python.org/dev/peps/pep-0008/
For usage and a list of options, try this:
$ python pep8.py -h
This program and its regression test suite live here:
http://github.com/jcrocholl/pep8
Groups of errors and warnings:
E errors
W warnings
100 indentation
200 whitespace
300 blank lines
400 imports
500 line length
600 deprecation
700 statements
900 syntax error
You can add checks to this program by writing plugins. Each plugin is
a simple function that is called for each line of source code, either
physical or logical.
Physical line:
- Raw line of text from the input file.
Logical line:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with 'xxx' of same length.
- Comments removed.
The check function requests physical or logical lines by the name of
the first argument:
def maximum_line_length(physical_line)
def extraneous_whitespace(logical_line)
def blank_lines(logical_line, blank_lines, indent_level, line_number)
The last example above demonstrates how check plugins can request
additional information with extra arguments. All attributes of the
Checker object are available. Some examples:
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
blank_lines: blank lines before this one
indent_char: first indentation character in this file (' ' or '\t')
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
The docstring of each check function shall be the relevant part of
text from PEP 8. It is printed if the user enables --show-pep8.
Several docstrings contain examples directly from the PEP 8 document.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
These examples are verified automatically when pep8.py is run with the
--doctest option. You can add examples for your own check functions.
The format is simple: "Okay" or error/warning code followed by colon
and space, the rest of the line is example source code. If you put 'r'
before the docstring, you can use \n for newline, \t for tab and \s
for space.
"""
from __future__ import unicode_literals
__version__ = '1.3.4a0'
import os
import sys
import re
import inspect
import keyword
import tokenize
from ninja_ide import resources
DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git'
DEFAULT_IGNORE = 'E24'
if sys.platform == 'win32':
DEFAULT_CONFIG = os.path.join(resources.HOME_PATH, r'.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.join(
resources.HOME_PATH, '.config'), 'pep8')
MAX_LINE_LENGTH = 79
REPORT_FORMAT = {
'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
}
SINGLETONS = frozenset(['False', 'None', 'True'])
KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
BINARY_OPERATORS = frozenset([
'**=', '*=', '+=', '-=', '!=', '<>',
'%=', '^=', '&=', '|=', '==', '/=', '//=', '<=', '>=', '<<=', '>>=',
'%', '^', '&', '|', '=', '/', '//', '<', '>', '<<'])
UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
OPERATORS = BINARY_OPERATORS | UNARY_OPERATORS
WHITESPACE = frozenset(' \t')
SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.NEWLINE,
tokenize.INDENT, tokenize.DEDENT])
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
INDENT_REGEX = re.compile(r'([ \t]*)')
RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*(,)')
RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,\s*\w+\s*,\s*\w+')
SELFTEST_REGEX = re.compile(r'(Okay|[EW]\d{3}):\s(.*)')
ERRORCODE_REGEX = re.compile(r'[EW]\d{3}')
DOCSTRING_REGEX = re.compile(r'u?r?["\']')
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)')
COMPARE_TYPE_REGEX = re.compile(r'([=!]=|is|is\s+not)\s*type(?:s\.(\w+)Type'
r'|\(\s*(\(\s*\)|[^)]*[^ )])\s*\))')
KEYWORD_REGEX = re.compile(r'(?:[^\s])(\s*)\b(?:%s)\b(\s*)' %
r'|'.join(KEYWORDS))
OPERATOR_REGEX = re.compile(r'(?:[^\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
LAMBDA_REGEX = re.compile(r'\blambda\b')
HUNK_REGEX = re.compile(r'^@@ -\d+,\d+ \+(\d+),(\d+) @@.*$')
# Work around Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
#Define instance options to set options to be analized
class Options:
pass
options = Options()
#Set the options to be analized
options.show_source = True
options.repeat = True
options.show_pep8 = False
options.messages = {}
options.select = []
options.ignore = []
options.verbose = 0
options.quiet = 0
options.max_line_length = MAX_LINE_LENGTH
options.ignore_continuation_indentation = True
##############################################################################
# Plugins (check functions) for physical lines
##############################################################################
def tabs_or_spaces(physical_line, indent_char):
r"""
Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
of tabs and spaces should be converted to using spaces exclusively. When
invoking the Python command line interpreter with the -t option, it issues
warnings about code that illegally mixes tabs and spaces. When using -tt
these warnings become errors. These options are highly recommended!
Okay: if a == 0:\n a = 1\n b = 1
E101: if a == 0:\n a = 1\n\tb = 1
"""
indent = INDENT_REGEX.match(physical_line).group(1)
for offset, char in enumerate(indent):
if char != indent_char:
return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_obsolete(physical_line):
r"""
For new projects, spaces-only are strongly recommended over tabs. Most
editors have features that make this easy to do.
Okay: if True:\n return
W191: if True:\n\treturn
"""
indent = INDENT_REGEX.match(physical_line).group(1)
if '\t' in indent:
return indent.index('\t'), "W191 indentation contains tabs"
def trailing_whitespace(physical_line):
r"""
JCR: Trailing whitespace is superfluous.
FBM: Except when it occurs as part of a blank line (i.e. the line is
nothing but whitespace). According to Python docs[1] a line with only
whitespace is considered a blank line, and is to be ignored. However,
matching a blank line to its indentation level avoids mistakenly
terminating a multi-line statement (e.g. class declaration) when
pasting code into the standard Python interpreter.
[1] http://docs.python.org/reference/lexical_analysis.html#blank-lines
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)
W291: spam(1)\s
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace"
def trailing_blank_lines(physical_line, lines, line_number):
r"""
JCR: Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\n
"""
if not physical_line.rstrip() and line_number == len(lines):
return 0, "W391 blank line at end of file"
def missing_newline(physical_line):
"""
JCR: The last line should have a newline.
Reports warning W292.
"""
if physical_line.rstrip() == physical_line:
return len(physical_line), "W292 no newline at end of file"
def maximum_line_length(physical_line, max_line_length):
"""
Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
Reports error E501.
"""
line = physical_line.rstrip()
length = len(line)
if length > max_line_length:
if hasattr(line, 'decode'): # Python 2
# The line could contain multi-byte characters
try:
length = len(line.decode('utf-8'))
except UnicodeError:
pass
if length > max_line_length:
return (max_line_length, "E501 line too long "
"(%d > %d characters)" % (length, max_line_length))
##############################################################################
# Plugins (check functions) for logical lines
##############################################################################
def blank_lines(logical_line, blank_lines, indent_level, line_number,
previous_logical, previous_indent_level):
r"""
Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
Okay: def a():\n pass\n\n\ndef b():\n pass
Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass
E301: class Foo:\n b = 0\n def bar():\n pass
E302: def a():\n pass\n\ndef b(n):\n pass
E303: def a():\n pass\n\n\n\ndef b(n):\n pass
E303: def a():\n\n\n\n pass
E304: @decorator\n\ndef a():\n pass
"""
if line_number == 1:
return # Don't expect blank lines before the first line
if previous_logical.startswith('@'):
if blank_lines:
yield 0, "E304 blank lines found after function decorator"
elif blank_lines > 2 or (indent_level and blank_lines == 2):
yield 0, "E303 too many blank lines (%d)" % blank_lines
elif logical_line.startswith(('def ', 'class ', '@')):
if indent_level:
if not (blank_lines or previous_indent_level < indent_level or
DOCSTRING_REGEX.match(previous_logical)):
yield 0, "E301 expected 1 blank line, found 0"
elif blank_lines != 2:
yield 0, "E302 expected 2 blank lines, found %d" % blank_lines
def extraneous_whitespace(logical_line):
"""
Avoid extraneous whitespace in the following situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
"""
line = logical_line
for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
text = match.group()
char = text.strip()
found = match.start()
if text == char + ' ':
# assert char in '([{'
yield found + 1, "E201 whitespace after '%s'" % char
elif line[found - 1] != ',':
code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
yield found, "%s whitespace before '%s'" % (code, char)
def whitespace_around_keywords(logical_line):
r"""
Avoid extraneous whitespace around keywords.
Okay: True and False
E271: True and False
E272: True and False
E273: True and\tFalse
E274: True\tand False
"""
for match in KEYWORD_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E274 tab before keyword"
elif len(before) > 1:
yield match.start(1), "E272 multiple spaces before keyword"
if '\t' in after:
yield match.start(2), "E273 tab after keyword"
elif len(after) > 1:
yield match.start(2), "E271 multiple spaces after keyword"
def missing_whitespace(logical_line):
"""
JCR: Each comma, semicolon or colon should be followed by whitespace.
Okay: [a, b]
Okay: (3,)
Okay: a[1:4]
Okay: a[:4]
Okay: a[1:]
Okay: a[1:4:2]
E231: ['a','b']
E231: foo(bar,baz)
"""
line = logical_line
for index in range(len(line) - 1):
char = line[index]
if char in ',;:' and line[index + 1] not in WHITESPACE:
before = line[:index]
if char == ':' and before.count('[') > before.count(']'):
continue # Slice syntax, no space required
if char == ',' and line[index + 1] == ')':
continue # Allow tuple with only one element: (3,)
yield index, "E231 missing whitespace after '%s'" % char
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
r"""
Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
Okay: a = 1
Okay: if a == 0:\n a = 1
E111: a = 1
Okay: for item in items:\n pass
E112: for item in items:\npass
Okay: a = 1\nb = 2
E113: a = 1\n b = 2
"""
if indent_char == ' ' and indent_level % 4:
yield 0, "E111 indentation is not a multiple of four"
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
yield 0, "E112 expected an indented block"
if indent_level > previous_indent_level and not indent_expect:
yield 0, "E113 unexpected indentation"
def continuation_line_indentation(logical_line, tokens, indent_level, verbose):
r"""
Continuation lines should align wrapped elements either vertically using
Python's implicit line joining inside parentheses, brackets and braces, or
using a hanging indent.
When using a hanging indent the following considerations should be applied:
- there should be no arguments on the first line, and
- further indentation should be used to clearly distinguish itself as a
continuation line.
Okay: a = (\n)
E123: a = (\n )
Okay: a = (\n 42)
E121: a = (\n 42)
E122: a = (\n42)
E123: a = (\n 42\n )
E124: a = (24,\n 42\n)
E125: if (a or\n b):\n pass
E126: a = (\n 42)
E127: a = (24,\n 42)
E128: a = (24,\n 42)
"""
if options.ignore_continuation_indentation:
return
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if nrows == 1:
return
# indent_next tells us whether the next block is indented; assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line; in turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
# remember how many brackets were opened on each line
parens = [0] * nrows
# relative indents of physical lines
rel_indent = [0] * nrows
# visual indents
indent = [indent_level]
indent_chances = {}
last_indent = tokens[0][2]
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
if newline:
# this is the beginning of a continuation line.
last_indent = start
# record the initial indent.
rel_indent[row] = start[1] - indent_level
if depth:
# a bracket expression in a continuation line.
# find the line that it was opened on
for open_row in range(row - 1, -1, -1):
if parens[open_row]:
break
else:
# an unbracketed continuation line (ie, backslash)
open_row = 0
hang = rel_indent[row] - rel_indent[open_row]
visual_indent = indent_chances.get(start[1])
if token_type == tokenize.OP and text in ']})':
# this line starts with a closing bracket
if indent[depth]:
if start[1] != indent[depth]:
yield (start, 'E124 closing bracket does not match '
'visual indentation')
elif hang:
yield (start, 'E123 closing bracket does not match '
'indentation of opening bracket\'s line')
elif visual_indent is True:
# visual indent is verified
if not indent[depth]:
indent[depth] = start[1]
elif visual_indent in (text, str):
# ignore token lined up with matching one from a previous line
pass
elif indent[depth] and start[1] < indent[depth]:
# visual indent is broken
yield (start, 'E128 continuation line '
'under-indented for visual indent')
elif hang == 4 or (indent_next and rel_indent[row] == 8):
# hanging indent is verified
pass
else:
# indent is broken
if hang <= 0:
error = 'E122', 'missing indentation or outdented'
elif indent[depth]:
error = 'E127', 'over-indented for visual indent'
elif hang % 4:
error = 'E121', 'indentation is not a multiple of four'
else:
error = 'E126', 'over-indented for hanging indent'
yield start, "%s continuation line %s" % error
# look for visual indenting
if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
and not indent[depth]):
indent[depth] = start[1]
indent_chances[start[1]] = True
# deal with implicit string concatenation
elif token_type == tokenize.STRING or text in ('u', 'ur', 'b', 'br'):
indent_chances[start[1]] = str
# keep track of bracket depth
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
parens[row] += 1
elif text in ')]}' and depth > 0:
# parent indents should not be more than this one
prev_indent = indent.pop() or last_indent[1]
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if start[1] not in indent_chances:
# allow to line up tokens
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if indent_next and rel_indent[-1] == 4:
yield (last_indent, "E125 continuation line does not distinguish "
"itself from next logical line")
def whitespace_before_parameters(logical_line, tokens):
"""
Avoid extraneous whitespace in the following situations:
- Immediately before the open parenthesis that starts the argument
list of a function call.
- Immediately before the open parenthesis that starts an indexing or
slicing.
Okay: spam(1)
E211: spam (1)
Okay: dict['key'] = list[index]
E211: dict ['key'] = list[index]
E211: dict['key'] = list [index]
"""
prev_type = tokens[0][0]
prev_text = tokens[0][1]
prev_end = tokens[0][3]
for index in range(1, len(tokens)):
token_type, text, start, end, line = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
(prev_type == tokenize.NAME or prev_text in '}])') and
# Syntax "class A (B):" is allowed, but avoid it
(index < 2 or tokens[index - 2][1] != 'class') and
# Allow "return (a.foo for a in range(5))"
not keyword.iskeyword(prev_text)):
yield prev_end, "E211 whitespace before '%s'" % text
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_operator(logical_line):
r"""
Avoid extraneous whitespace in the following situations:
- More than one space around an assignment (or other) operator to
align it with another.
Okay: a = 12 + 3
E221: a = 4 + 5
E222: a = 4 + 5
E223: a = 4\t+ 5
E224: a = 4 +\t5
"""
for match in OPERATOR_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E223 tab before operator"
elif len(before) > 1:
yield match.start(1), "E221 multiple spaces before operator"
if '\t' in after:
yield match.start(2), "E224 tab after operator"
elif len(after) > 1:
yield match.start(2), "E222 multiple spaces after operator"
def missing_whitespace_around_operator(logical_line, tokens):
r"""
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <>, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- Use spaces around arithmetic operators.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: baz(**kwargs)
Okay: negative = -1
Okay: spam(-1)
Okay: alpha[:-i]
Okay: if not -5 < x < +5:\n pass
Okay: lambda *args, **kw: (args, kw)
E225: i=i+1
E225: submitted +=1
E225: x = x*2 - 1
E225: hypot2 = x*x + y*y
E225: c = (a+b) * (a-b)
E225: c = alpha -4
E225: z = x **y
"""
parens = 0
need_space = False
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN):
# ERRORTOKEN is triggered by backticks in Python 3000
continue
if text in ('(', 'lambda'):
parens += 1
elif text == ')':
parens -= 1
if need_space:
if start != prev_end:
need_space = False
elif text == '>' and prev_text in ('<', '-'):
# Tolerate the "<>" operator, even if running Python 3
# Deal with Python 3's annotated return value "->"
pass
else:
yield prev_end, "E225 missing whitespace around operator"
need_space = False
elif token_type == tokenize.OP and prev_end is not None:
if text == '=' and parens:
# Allow keyword args or defaults: foo(bar=None).
pass
elif text in BINARY_OPERATORS:
need_space = True
elif text in UNARY_OPERATORS:
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
if prev_type == tokenize.OP:
if prev_text in '}])':
need_space = True
elif prev_type == tokenize.NAME:
if prev_text not in KEYWORDS:
need_space = True
elif prev_type not in SKIP_TOKENS:
need_space = True
if need_space and start == prev_end:
yield prev_end, "E225 missing whitespace around operator"
need_space = False
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_comma(logical_line):
r"""
Avoid extraneous whitespace in the following situations:
- More than one space around an assignment (or other) operator to
align it with another.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0]
def whitespace_around_named_parameter_equals(logical_line, tokens):
"""
Don't use spaces around the '=' sign when used to indicate a
keyword argument or a default parameter value.
Okay: def complex(real, imag=0.0):
Okay: return magic(r=real, i=imag)
Okay: boolean(a == b)
Okay: boolean(a != b)
Okay: boolean(a <= b)
Okay: boolean(a >= b)
E251: def complex(real, imag = 0.0):
E251: return magic(r = real, i = imag)
"""
parens = 0
no_space = False
prev_end = None
for token_type, text, start, end, line in tokens:
if no_space:
no_space = False
if start != prev_end:
yield (prev_end,
"E251 no spaces around keyword / parameter equals")
elif token_type == tokenize.OP:
if text == '(':
parens += 1
elif text == ')':
parens -= 1
elif parens and text == '=':
no_space = True
if start != prev_end:
yield (prev_end,
"E251 no spaces around keyword / parameter equals")
prev_end = end
def whitespace_before_inline_comment(logical_line, tokens):
"""
Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
if not line[:start[1]].strip():
continue
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
yield (prev_end,
"E261 at least two spaces before inline comment")
if text.startswith('# ') or not text.startswith('# '):
yield start, "E262 inline comment should start with '# '"
elif token_type != tokenize.NL:
prev_end = end
def imports_on_separate_lines(logical_line):
r"""
Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
Okay: from subprocess import Popen, PIPE
Okay: from myclas import MyClass
Okay: from foo.bar.yourclass import YourClass
Okay: import myclass
Okay: import foo.bar.yourclass
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if -1 < found and ';' not in line[:found]:
yield found, "E401 multiple imports on one line"
def compound_statements(logical_line):
r"""
Compound statements (multiple statements on the same line) are
generally discouraged.
While sometimes it's okay to put an if/for/while with a small body
on the same line, never do this for multi-clause statements. Also
avoid folding such long lines!
Okay: if foo == 'blah':\n do_blah_thing()
Okay: do_one()
Okay: do_two()
Okay: do_three()
E701: if foo == 'blah': do_blah_thing()
E701: for x in lst: total += x
E701: while t < 10: t = delay()
E701: if foo == 'blah': do_blah_thing()
E701: else: do_non_blah_thing()
E701: try: something()
E701: finally: cleanup()
E701: if foo == 'blah': one(); two(); three()
E702: do_one(); do_two(); do_three()
"""
line = logical_line
found = line.find(':')
if -1 < found < len(line) - 1:
before = line[:found]
if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
before.count('(') <= before.count(')') and # (Python 3 annotation)
not LAMBDA_REGEX.search(before)): # lambda x: x
yield found, "E701 multiple statements on one line (colon)"
found = line.find(';')
if -1 < found:
yield found, "E702 multiple statements on one line (semicolon)"
def explicit_line_join(logical_line, tokens):
r"""
Avoid explicit line join between brackets.
The preferred way of wrapping long lines is by using Python's implied line
continuation inside parentheses, brackets and braces. Long lines can be
broken over multiple lines by wrapping expressions in parentheses. These
should be used in preference to using a backslash for line continuation.
E502: aaa = [123, \\n 123]
E502: aaa = ("bbb " \\n "ccc")
Okay: aaa = [123,\n 123]
Okay: aaa = ("bbb "\n "ccc")
Okay: aaa = "bbb " \\n "ccc"
"""
prev_start = prev_end = parens = 0
for token_type, text, start, end, line in tokens:
if start[0] != prev_start and parens and backslash:
yield backslash, "E502 the backslash is redundant between brackets"
if end[0] != prev_end:
if line.rstrip('\r\n').endswith('\\'):
backslash = (end[0], len(line.splitlines()[-1]) - 1)
else:
backslash = None
prev_start = prev_end = end[0]
else:
prev_start = start[0]
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in ')]}':
parens -= 1
def comparison_to_singleton(logical_line):
"""
Comparisons to singletons like None should always be done
with "is" or "is not", never the equality operators.
Okay: if arg is not None:
E711: if arg != None:
E712: if arg == True:
Also, beware of writing if x when you really mean if x is not None --
e.g. when testing whether a variable or argument that defaults to None was
set to some other value. The other value might have a type (such as a
container) that could be false in a boolean context!
"""
match = COMPARE_SINGLETON_REGEX.search(logical_line)
if match:
same = (match.group(1) == '==')
singleton = match.group(2)
msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
if singleton in ('None',):
code = 'E711'
else:
code = 'E712'
nonzero = ((singleton == 'True' and same) or
(singleton == 'False' and not same))
msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
yield match.start(1), ("%s comparison to %s should be %s" %
(code, singleton, msg))
def comparison_type(logical_line):
"""
Object type comparisons should always use isinstance() instead of
comparing types directly.
Okay: if isinstance(obj, int):
E721: if type(obj) is type(1):
When checking if an object is a string, keep in mind that it might be a
unicode string too! In Python 2.3, str and unicode have a common base
class, basestring, so you can do:
Okay: if isinstance(obj, basestring):
Okay: if type(a1) is type(b1):
"""
match = COMPARE_TYPE_REGEX.search(logical_line)
if match:
inst = match.group(3)
if inst and isidentifier(inst) and inst not in SINGLETONS:
return # Allow comparison for types which are not obvious
yield match.start(1), "E721 do not compare types, use 'isinstance()'"
def python_3000_has_key(logical_line):
r"""
The {}.has_key() method will be removed in the future version of
Python. Use the 'in' operation instead.
Okay: if "alph" in d:\n print d["alph"]
W601: assert d.has_key('alph')
"""
pos = logical_line.find('.has_key(')
if pos > -1:
yield pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
"""
When raising an exception, use "raise ValueError('message')"
instead of the older form "raise ValueError, 'message'".
The paren-using form is preferred because when the exception arguments
are long or include string formatting, you don't need to use line
continuation characters thanks to the containing parentheses. The older
form will be removed in Python 3000.
Okay: raise DummyError("Message")
W602: raise DummyError, "Message"
"""
match = RAISE_COMMA_REGEX.match(logical_line)
if match and not RERAISE_COMMA_REGEX.match(logical_line):
yield match.start(1), "W602 deprecated form of raising exception"
def python_3000_not_equal(logical_line):
"""
!= can also be written <>, but this is an obsolete usage kept for
backwards compatibility only. New code should always use !=.
The older syntax is removed in Python 3000.
Okay: if a != 'no':
W603: if a <> 'no':
"""
pos = logical_line.find('<>')
if pos > -1:
yield pos, "W603 '<>' is deprecated, use '!='"
def python_3000_backticks(logical_line):
"""
Backticks are removed in Python 3000.
Use repr() instead.
Okay: val = repr(1 + 2)
W604: val = `1 + 2`
"""
pos = logical_line.find('`')
if pos > -1:
yield pos, "W604 backticks are deprecated, use 'repr()'"
##############################################################################
# Helper functions
##############################################################################
if '' == ''.encode():
# Python 2: implicit encoding.
isidentifier = re.compile(r'[a-zA-Z_]\w*').match
else:
# Python 3
isidentifier = str.isidentifier
def expand_indent(line):
r"""
Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
if '\t' not in line:
return len(line) - len(line.lstrip())
result = 0
for char in line:
if char == '\t':
result = result // 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
def mute_string(text):
"""
Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
# String modifiers (e.g. u or r)
start = text.index(text[-1]) + 1
end = len(text) - 1
# Triple quotes
if text[-3:] in ('"""', "'''"): # '''
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
##############################################################################
# Framework to run all checks
##############################################################################
def find_checks(argument_name):
"""
Find all globally visible functions where the first argument name
starts with argument_name.
"""
checks = []
for name, function in list(globals().items()):
if not inspect.isfunction(function):
continue
args = inspect.getargspec(function)[0]
if args and args[0].startswith(argument_name):
codes = ERRORCODE_REGEX.findall(function.__doc__ or '')
# yield name, codes, function, args
for code in codes or ['']:
if not code or not ignore_code(code):
checks.append((name, function, args))
break
checks.sort()
return checks
def ignore_code(code):
"""
Check if options.ignore contains a prefix of the error code.
If options.select contains a prefix of the error code, do not ignore it.
"""
for select in options.select:
if code.startswith(select):
return False
for ignore in options.ignore:
if code.startswith(ignore):
return True
#Set Physical and Logical check functions
options.physical_checks = find_checks('physical_line')
options.logical_checks = find_checks('logical_line')
def refresh_checks():
#Refresh the Physical and Logical check functions
options.physical_checks = find_checks('physical_line')
options.logical_checks = find_checks('logical_line')
class Checker(object):
"""
Load a Python source file, tokenize it, check coding style.
"""
def __init__(self, filename, lines=None):
self._physical_checks = options.physical_checks
self._logical_checks = options.logical_checks
self.max_line_length = options.max_line_length
self.verbose = options.verbose
self.filename = filename
self.lines = lines
self.results = []
def readline(self):
"""
Get the next line from the input buffer.
"""
self.line_number += 1
if self.line_number > len(self.lines):
return ''
return self.lines[self.line_number - 1]
def readline_check_physical(self):
"""
Check and return the next physical line. This method can be
used to feed tokenize.generate_tokens.
"""
line = self.readline()
if line:
self.check_physical(line)
return line
def run_check(self, check, argument_names):
"""
Run a check plugin.
"""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def check_physical(self, line):
"""
Run all physical checks on a raw input line.
"""
self.physical_line = line
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
for name, check, argument_names in self._physical_checks:
result = self.run_check(check, argument_names)
if result is not None:
offset, text = result
self.report_error(self.line_number, offset, text, check)
def build_tokens_line(self):
"""
Build a logical line from tokens.
"""
self.mapping = []
logical = []
length = 0
previous = None
for token in self.tokens:
token_type, text = token[0:2]
if token_type in SKIP_TOKENS:
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if previous:
end_row, end = previous[3]
start_row, start = token[2]
if end_row != start_row: # different row
prev_text = self.lines[end_row - 1][end - 1]
if prev_text == ',' or (prev_text not in '{[('
and text not in '}])'):
logical.append(' ')
length += 1
elif end != start: # different column
fill = self.lines[end_row - 1][end:start]
logical.append(fill)
length += len(fill)
self.mapping.append((length, token))
logical.append(text)
length += len(text)
previous = token
self.logical_line = ''.join(logical)
assert self.logical_line.strip() == self.logical_line
def check_logical(self):
"""
Build a line from tokens and run all logical checks on it.
"""
self.build_tokens_line()
first_line = self.lines[self.mapping[0][1][2][0] - 1]
indent = first_line[:self.mapping[0][1][2][1]]
self.previous_indent_level = self.indent_level
self.indent_level = expand_indent(indent)
for name, check, argument_names in self._logical_checks:
for result in self.run_check(check, argument_names):
offset, text = result
if isinstance(offset, tuple):
orig_number, orig_offset = offset
else:
for token_offset, token in self.mapping:
if offset >= token_offset:
orig_number = token[2][0]
orig_offset = (token[2][1] + offset - token_offset)
self.report_error(orig_number, orig_offset, text, check)
self.previous_logical = self.logical_line
def generate_tokens(self):
tokengen = tokenize.generate_tokens(self.readline_check_physical)
try:
for token in tokengen:
yield token
except (SyntaxError, tokenize.TokenError):
exc_type, exc = sys.exc_info()[:2]
offset = exc.args[1]
if len(offset) > 2:
offset = offset[1:3]
self.report_error(offset[0], offset[1],
'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
self.generate_tokens)
generate_tokens.__doc__ = " Check if the syntax is valid."
def check_all(self, expected=None, line_offset=0):
"""
Run all checks on the input file.
"""
self.line_number = 0
self.line_offset = line_offset
self.indent_char = None
self.indent_level = 0
self.previous_logical = ''
self.tokens = []
self.blank_lines = blank_lines_before_comment = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type == tokenize.NEWLINE:
if self.blank_lines < blank_lines_before_comment:
self.blank_lines = blank_lines_before_comment
self.check_logical()
self.tokens = []
self.blank_lines = blank_lines_before_comment = 0
elif token_type == tokenize.NL:
if len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
self.tokens = []
elif token_type == tokenize.COMMENT and len(self.tokens) == 1:
if blank_lines_before_comment < self.blank_lines:
blank_lines_before_comment = self.blank_lines
self.blank_lines = 0
if COMMENT_WITH_NL:
# The comment also ends a physical line
self.tokens = []
return self.results
def report_error(self, line_number, offset, text, check):
"""
Report an error, according to options.
"""
if options.repeat:
self.results.append("%s:%s:%d: %s" %
(self.filename, self.line_offset + line_number,
offset + 1, text))
if options.show_source and len(self.lines) >= line_number:
line = self.lines[line_number - 1]
self.results.append(line.rstrip())
self.results.append(' ' * offset + '^')
if options.show_pep8:
self.results.append(check.__doc__.lstrip('\n').rstrip())
def run_check(fileName, source):
"""
Parse options and run checks on Python source.
"""
try:
lines = ['%s\n' % line for line in source.splitlines()]
return Checker(fileName, lines).check_all()
except Exception as reason:
print(('pep8mod couldn\'t parse file: {0}'.format(fileName)))
print(reason)
raise
return []
|
WebSpider/headphones
|
refs/heads/master
|
lib/bs4/element.py
|
35
|
from pdb import set_trace
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None, next_element=None,
previous_sibling=None, next_sibling=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = next_element
if self.next_element:
self.next_element.previous_element = self
self.next_sibling = next_sibling
if self.next_sibling:
self.next_sibling.previous_sibling = self
if (not previous_sibling
and self.parent is not None and self.parent.contents):
previous_sibling = self.parent.contents[-1]
self.previous_sibling = previous_sibling
if previous_sibling:
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if not self.parent:
raise ValueError(
"Cannot replace one element with another when the"
"element to be replaced is not part of a tree.")
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
if not self.parent:
raise ValueError(
"Cannot replace an element with its contents when that"
"element is not part of a tree.")
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if (self.previous_element is not None and
self.previous_element != next_element):
self.previous_element.next_element = next_element
if next_element is not None and next_element != self.previous_element:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if (self.previous_sibling is not None
and self.previous_sibling != self.next_sibling):
self.previous_sibling.next_sibling = self.next_sibling
if (self.next_sibling is not None
and self.next_sibling != self.previous_sibling):
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child == self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if text is None and 'string' in kwargs:
text = kwargs['string']
del kwargs['string']
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, basestring):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-zA-Z0-9][-.a-zA-Z0-9:_]*$')
# /^([a-zA-Z0-9][-.a-zA-Z0-9:_]*)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---------------------------/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>[a-zA-Z0-9][-.a-zA-Z0-9:_]*)?\[(?P<attribute>[\w-]+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
u = unicode.__new__(cls, value)
else:
u = unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
u.setup()
return u
def __copy__(self):
"""A copy of a NavigableString has the same contents and class
as the original, but it is not connected to the parse tree.
"""
return type(self)(self)
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs:
if builder is not None and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
def __copy__(self):
"""A copy of a Tag is a new Tag, unconnected to the parse tree.
Its contents are a copy of the old Tag's contents.
"""
clone = type(self)(None, self.builder, self.name, self.namespace,
self.nsprefix, self.attrs)
for attr in ('can_be_empty_element', 'hidden'):
setattr(clone, attr, getattr(self, attr))
for child in self.contents:
clone.append(child.__copy__())
return clone
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding="unicode-escape"):
"""Renders this tag as a string."""
if PY3K:
# "The return value must be a string object", i.e. Unicode
return self.decode()
else:
# "The return value must be a string object", i.e. a bytestring.
# By convention, the return value of __repr__ should also be
# an ASCII string.
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
if PY3K:
return self.decode()
else:
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param indent_level: Each line of the rendering will be
indented this many spaces.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
:param formatter: The output formatter responsible for converting
entities to Unicode characters.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring.
:param indent_level: Each line of the rendering will be
indented this many spaces.
:param eventual_encoding: The bytestring will be in this encoding.
:param formatter: The output formatter responsible for converting
entities to Unicode characters.
"""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select_one(self, selector):
"""Perform a CSS selection operation on the current element."""
value = self.select(selector, limit=1)
if value:
return value[0]
return None
def select(self, selector, _candidate_generator=None, limit=None):
"""Perform a CSS selection operation on the current element."""
# Remove whitespace directly after the grouping operator ','
# then split into tokens.
tokens = re.sub(',[\s]*',',', selector).split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token_group in enumerate(tokens):
new_context = []
new_context_ids = set([])
# Grouping selectors, ie: p,a
grouped_tokens = token_group.split(',')
if '' in grouped_tokens:
raise ValueError('Invalid group selection syntax: %s' % token_group)
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
for token in grouped_tokens:
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is None:
pseudo_type = pseudo
pseudo_value = None
else:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
count = 0
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
if limit and len(new_context) >= limit:
break
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
|
tequa/ammisoft
|
refs/heads/master
|
ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/backend_bases.py
|
4
|
"""
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes such as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
:class:`ToolContainerBase`
The base class for the Toolbar class of each interactive backend.
:class:`StatusbarBase`
The base class for the messaging area.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from contextlib import contextmanager
import six
from six.moves import xrange
import os
import sys
import warnings
import time
import io
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
#import matplotlib.path as path
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib import get_backend
from matplotlib._pylab_helpers import Gcf
from matplotlib import lines
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation, warn_deprecated
import matplotlib.backend_tools as tools
try:
from importlib import import_module
except:
# simple python 2.6 implementation (no relative imports)
def import_module(name):
__import__(name)
return sys.modules[name]
try:
from PIL import Image
_has_pil = True
del Image
except ImportError:
_has_pil = False
_default_filetypes = {
'ps': 'Postscript',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
_default_backends = {
'ps': 'matplotlib.backends.backend_ps',
'eps': 'matplotlib.backends.backend_ps',
'pdf': 'matplotlib.backends.backend_pdf',
'pgf': 'matplotlib.backends.backend_pgf',
'png': 'matplotlib.backends.backend_agg',
'raw': 'matplotlib.backends.backend_agg',
'rgba': 'matplotlib.backends.backend_agg',
'svg': 'matplotlib.backends.backend_svg',
'svgz': 'matplotlib.backends.backend_svg',
}
def register_backend(format, backend, description=None):
"""
Register a backend for saving to a given file format.
format : str
File extention
backend : module string or canvas class
Backend for handling file output
description : str, optional
Description of the file type. Defaults to an empty string
"""
if description is None:
description = ''
_default_backends[format] = backend
_default_filetypes[format] = description
def get_registered_canvas_class(format):
"""
Return the registered default canvas for given file format.
Handles deferred import of required backend.
"""
if format not in _default_backends:
return None
backend_class = _default_backends[format]
if cbook.is_string_like(backend_class):
backend_class = import_module(backend_class).FigureCanvas
_default_backends[format] = backend_class
return backend_class
class ShowBase(object):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
"""
Show all figures. If *block* is not None, then
it is a boolean that overrides all other factors
determining whether show blocks by calling mainloop().
The other factors are:
it does not block if run inside ipython's "%pylab" mode
it does not block in interactive mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is not None:
if block:
self.mainloop()
return
else:
return
# Hack: determine at runtime whether we are
# inside ipython in pylab mode.
from matplotlib import pyplot
try:
ipython_pylab = not pyplot.show._needmain
# IPython versions >= 0.10 tack the _needmain
# attribute onto pyplot.show, and always set
# it to False, when in %pylab mode.
ipython_pylab = ipython_pylab and get_backend() != 'WebAgg'
# TODO: The above is a hack to get the WebAgg backend
# working with ipython's `%pylab` mode until proper
# integration is implemented.
except AttributeError:
ipython_pylab = False
# Leave the following as a separate step in case we
# want to control this behavior with an rcParam.
if ipython_pylab:
return
if not is_interactive() or get_backend() == 'WebAgg':
self.mainloop()
def mainloop(self):
pass
class RendererBase(object):
"""An abstract base class to handle drawing/rendering operations.
The following methods must be implemented in the backend for full
functionality (though just implementing :meth:`draw_path` alone would
give a highly capable backend):
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_gouraud_triangle`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_text`
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transforms.Affine2D(transform)))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_ids, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
transform = transforms.Affine2D(
transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], np.float_)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
*points* is a 3x2 array of (x, y) points for the triangle.
*colors* is a 3x4 array of RGBA colors for each point of the
triangle.
*transform* is an affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
*points* is a Nx3x2 array of (x, y) points for the trianglex.
*colors* is a Nx3x4 array of RGBA colors for each point of the
triangles.
*transform* is an affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = Affine2D(all_transforms[i % Ntransforms])
yield path, transform + master_transform
def _iter_collection_uses_per_path(self, paths, all_transforms,
offsets, facecolors, edgecolors):
"""
Compute how many times each raw path object returned by
_iter_collection_raw_paths would be used when calling
_iter_collection. This is intended for the backend to decide
on the tradeoff between using the paths in-line and storing
them once and reusing. Rounds up in case the number of uses
is not the same for every path.
"""
Npaths = len(paths)
if Npaths == 0 or (len(facecolors) == 0 and len(edgecolors) == 0):
return 0
Npath_ids = max(Npaths, len(all_transforms))
N = max(Npath_ids, len(offsets))
return (N + Npath_ids - 1) // Npath_ids
def _iter_collection(self, gc, master_transform, all_transforms,
path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Ntransforms = len(all_transforms)
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if offset_position == 'data':
if Ntransforms:
transform = (
Affine2D(all_transforms[i % Ntransforms]) +
master_transform)
else:
transform = master_transform
xo, yo = transform.transform_point((xo, yo))
xp, yp = transform.transform_point((0, 0))
xo = -(xp - xo)
yo = -(yp - yo)
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
fg = edgecolors[i % Nedgecolors]
if len(fg) == 4:
if fg[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(fg)
else:
gc0.set_foreground(fg)
if rgbFace is not None and len(rgbFace) == 4:
if rgbFace[3] == 0:
rgbFace = None
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im, transform=None):
"""
Draw an RGBA image.
*gc*
a :class:`GraphicsContextBase` instance with clipping information.
*x*
the distance in physical units (i.e., dots or pixels) from the left
hand side of the canvas.
*y*
the distance in physical units (i.e., dots or pixels) from the
bottom side of the canvas.
*im*
An NxMx4 array of RGBA pixels (of dtype uint8).
*transform*
If and only if the concrete backend is written such that
:meth:`option_scale_image` returns ``True``, an affine
transformation *may* be passed to :meth:`draw_image`. It takes the
form of a :class:`~matplotlib.transforms.Affine2DBase` instance.
The translation vector of the transformation is given in physical
units (i.e., dots or pixels). Note that the transformation does not
override `x` and `y`, and has to be applied *before* translating
the result by `x` and `y` (this can be accomplished by adding `x`
and `y` to the translation vector defined by `transform`).
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily always
want to rescale and composite raster images. (like SVG, PDF, or PS)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary affine
transformations in :meth:`draw_image` (most vector backends).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text baseline in display coords
*s*
the text string
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
*mtext*
a :class:`matplotlib.text.Text` instance
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be plotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False,
usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
usetex=False)
path = Path(verts, codes)
angle = angle / 180. * 3.141592
if self.flipy():
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, self.height - y)
else:
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
if ismath == 'TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, e.g., postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
pass
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
pass
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
pass
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
pass
class GraphicsContextBase(object):
"""
An abstract base class that provides color, line styles, etc...
"""
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._hatch_color = colors.to_rgba(rcParams['hatch.color'])
self._hatch_linewidth = rcParams['hatch.linewidth']
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._hatch_color = gc._hatch_color
self._hatch_linewidth = gc._hatch_linewidth
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
pass
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<https://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_gid(self):
"""
Return the object identifier if one is set, None otherwise.
"""
return self._gid
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._rgb, isRGBA=True)
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b:
self._antialiased = 1
else:
self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
if path is not None and not isinstance(path,
transforms.TransformedPath):
msg = ("Path should be a matplotlib.transforms.TransformedPath"
"instance.")
raise ValueError(msg)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points.
``(None, None)`` specifies a solid line
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl <= 0.0):
raise ValueError("All values in the dash list must be positive")
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgba, set ``isRGBA=True`` for efficiency.
"""
if self._forced_alpha and isRGBA:
self._rgb = fg[:3] + (self._alpha,)
elif self._forced_alpha:
self._rgb = colors.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
# When removing, remember to remove all overrides in subclasses.
msg = ("set_graylevel is deprecated for removal in 1.6; "
"you can achieve the same result by using "
"set_foreground((frac, frac, frac))")
warnings.warn(msg, mplDeprecation)
self._rgb = (frac, frac, frac, self._alpha)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = float(w)
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). These are defined in the rcParams
`lines.dashed_pattern`, `lines.dashdot_pattern` and
`lines.dotted_pattern`. One may also specify customized dash
styles by providing a tuple of (offset, dash pairs).
"""
self._linestyle = style
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_gid(self, id):
"""
Sets the id.
"""
self._gid = id
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
if self._hatch is None:
return None
return Path.hatch(self._hatch, density)
def get_hatch_color(self):
"""
Gets the color to use for hatching.
"""
return self._hatch_color
def set_hatch_color(self, hatch_color):
"""
sets the color to use for hatching.
"""
self._hatch_color = hatch_color
def get_hatch_linewidth(self):
"""
Gets the linewidth to use for hatching.
"""
return self._hatch_linewidth
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes:
* `interval`: The time between timer events in
milliseconds. Default is 1000 ms.
* `single_shot`: Boolean flag indicating whether this timer
should operate as single shot (run once and then
stop). Defaults to `False`.
* `callbacks`: Stores list of (func, args) tuples that will be
called upon timer events. This list can be manipulated
directly, or the functions `add_callback` and
`remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self._set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
'Used to set interval on underlying timer object.'
pass
def _timer_set_single_shot(self):
'Used to set single shot on underlying timer object.'
pass
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == False` here,
# instead of `if not ret`.
if ret == False:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event(object):
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
In addition to the :class:`Event` attributes, the following event
attributes are defined:
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y, guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas, guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes()
if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axes_list.sort(key=lambda x: x.zorder)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
trans = self.inaxes.transData.inverted()
xdata, ydata = trans.transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes != self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events). Note that in the nbagg backend, both the
middle and right clicks return 3 since right clicking will bring
up the context menu in some browsers.
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
dblclick = None # whether or not the event is the result of a double click
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%s " +
"dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
self.ydata, self.button,
self.dblclick, self.inaxes)
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- e.g., a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
ax.plot(np.rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
line = event.artist
xdata, ydata = line.get_data()
ind = event.ind
print('on pick line:', np.array([xdata[ind], ydata[ind]]).T)
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key(s) pressed. Could be **None**, a single case sensitive ascii
character ("g", "G", "#", etc.), a special key
("control", "shift", "f1", "up", etc.) or a
combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
.. note::
Modifier keys will be prefixed to the pressed key and will be in the
order "ctrl", "alt", "super". The exception to this rule is when the
pressed key is itself a modifier key, therefore "ctrl+alt" and
"alt+control" can both be valid key values.
Example usage::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
supports_blit = True
fixed_dpi = None
filetypes = _default_filetypes
if _has_pil:
# JPEG support
register_backend('jpg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
register_backend('jpeg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
# TIFF support
register_backend('tif', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
register_backend('tiff', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
def __init__(self, figure):
self._is_idle_drawing = True
self._is_saving = False
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_idle_drawing = False
@contextmanager
def _idle_draw_cntx(self):
self._is_idle_drawing = True
yield
self._is_idle_drawing = False
def is_saving(self):
"""
Returns `True` when the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
# Find the top artist under the cursor
under = self.figure.hitlist(ev)
under.sort(key=lambda x: x.zorder)
h = None
if under:
h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
msg = ("onHilite has been deprecated in 1.5 and will be removed "
"in 1.6. This function has not been used internally by mpl "
"since 2007.")
warnings.warn(msg, mplDeprecation)
if not hasattr(self, '_active'):
self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
# On leave restore the captured colour
for a in leave:
if hasattr(a, 'get_color'):
a.set_color(self._active[a])
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a, 'get_color'):
self._active[a] = a.get_color()
elif hasattr(a, 'get_edgecolor'):
self._active[a] = (a.get_edgecolor(), a.get_facecolor())
else:
self._active[a] = None
for a in enter:
if hasattr(a, 'get_color'):
a.set_color('red')
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else:
self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def close_event(self, guiEvent=None):
"""
This method will be called by all functions connected to the
'close_event' with a :class:`CloseEvent`
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except (TypeError, AttributeError):
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
# AttributeError occurs on OSX with qt4agg upon exiting
# with an open window; 'callbacks' attribute no longer exists.
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist,
guiEvent=mouseevent.guiEvent,
**kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key,
dblclick=dblclick, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
self._lastx, self._lasty = None, None
def enter_notify_event(self, guiEvent=None, xy=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
*xy*
the coordinate location of the pointer when the canvas is
entered
"""
if xy is not None:
x, y = xy
self._lastx, self._lasty = x, y
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
"""Called when GUI is idle."""
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError('two different attempted to grab mouse input')
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse
grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
if not self._is_idle_drawing:
with self._idle_draw_cntx():
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
Return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
@classmethod
def get_supported_filetypes(cls):
"""Return dict of savefig file formats supported by this backend"""
return cls.filetypes
@classmethod
def get_supported_filetypes_grouped(cls):
"""Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg']."""
groupings = {}
for ext, name in six.iteritems(cls.filetypes):
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_output_canvas(self, format):
"""Return a canvas that is suitable for saving figures to a specified
file format. If necessary, this function will switch to a registered
backend that supports the format.
"""
method_name = 'print_%s' % format
# check if this canvas supports the requested format
if hasattr(self, method_name):
return self
# check if there is a default canvas for the requested format
canvas_class = get_registered_canvas_class(format)
if canvas_class:
return self.switch_backends(canvas_class)
# else report error for unsupported format
formats = sorted(self.get_supported_filetypes())
raise ValueError('Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
def print_figure(self, filename, dpi=None, facecolor=None, edgecolor=None,
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure; if None, defaults to savefig.facecolor
*edgecolor*
the edgecolor of the figure; if None, defaults to savefig.edgecolor
*orientation*
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
*bbox_inches*
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
*pad_inches*
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
*bbox_extra_artists*
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
self._is_saving = True
if format is None:
# get format from filename, or from backend's default filetype
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
# get canvas object and print method for format
canvas = self._get_output_canvas(format)
print_method = getattr(canvas, 'print_%s' % format)
if dpi is None:
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = getattr(self.figure, '_original_dpi', self.figure.dpi)
if facecolor is None:
facecolor = rcParams['savefig.facecolor']
if edgecolor is None:
edgecolor = rcParams['savefig.edgecolor']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
#result = getattr(self, method_name)
result = print_method(
io.BytesIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bbox_artists = kwargs.pop("bbox_extra_artists", None)
if bbox_artists is None:
bbox_artists = self.figure.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / self.figure.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, bbox_inches,
canvas.fixed_dpi)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
try:
#result = getattr(self, method_name)(
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
self._is_saving = False
#self.figure.canvas.draw() ## seems superfluous
return result
@classmethod
def get_default_filetype(cls):
"""
Get the default savefig file format as specified in rcParam
``savefig.format``. Returned string excludes period. Overridden
in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
return self.manager.get_window_title()
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def get_default_filename(self):
"""
Return a string, which includes extension, suitable for use as
a default filename.
"""
default_basename = self.get_window_title() or 'image'
default_basename = default_basename.replace(' ', '_')
default_filetype = self.get_default_filetype()
default_filename = default_basename + '.' + default_filetype
save_dir = os.path.expanduser(rcParams.get('savefig.directory', ''))
# ensure non-existing filename in save dir
i = 1
while os.path.isfile(os.path.join(save_dir, default_filename)):
# attach numerical count to basename
default_filename = '{0}-{1}.{2}'.format(default_basename, i, default_filetype)
i += 1
return default_filename
def switch_backends(self, FigureCanvasClass):
"""
Instantiate an instance of FigureCanvasClass
This is used for backend switching, e.g., to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (e.g., setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
newCanvas._is_saving = self._is_saving
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
if s == 'idle_event':
warn_deprecated(1.5,
"idle_event is only implemented for the wx backend, and will "
"be removed in matplotlib 2.1. Use the animations module "
"instead.")
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only for
backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self, timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str, mplDeprecation)
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
"""
self._looping = False
def key_press_handler(event, canvas, toolbar=None):
"""
Implement the default mpl key bindings for the canvas and toolbar
described at :ref:`key-event-handling`
*event*
a :class:`KeyEvent` instance
*canvas*
a :class:`FigureCanvasBase` instance
*toolbar*
a :class:`NavigationToolbar2` instance
"""
# these bindings happen whether you are over an axes or not
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
grid_keys = rcParams['keymap.grid']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all = rcParams['keymap.all_axes']
# toggle fullscreen mode ('f', 'ctrl + f')
if event.key in fullscreen_keys:
try:
canvas.manager.full_screen_toggle()
except AttributeError:
pass
# quit the figure (defaut key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
toolbar._set_cursor(event)
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
toolbar._set_cursor(event)
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an axes to trigger
# switching on/off a grid in current axes (default key 'g')
if event.key in grid_keys:
event.inaxes.grid()
canvas.draw()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
ax = event.inaxes
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
try:
ax.set_yscale('log')
except ValueError as exc:
warnings.warn(str(exc))
ax.set_yscale('linear')
ax.figure.canvas.draw()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
ax = event.inaxes
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scalex == 'linear':
try:
ax.set_xscale('log')
except ValueError:
warnings.warn(str(exc))
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif (event.key.isdigit() and event.key != '0') or event.key in all:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all):
n = int(event.key) - 1
for i, a in enumerate(canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all:
a.set_navigate(True)
else:
a.set_navigate(i == n)
class NonGuiException(Exception):
pass
class FigureManagerBase(object):
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure number
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
if rcParams['toolbar'] != 'toolmanager':
self.key_press_handler_id = self.canvas.mpl_connect(
'key_press_event',
self.key_press)
else:
self.key_press_handler_id = None
"""
The returned id from connecting the default key handler via
:meth:`FigureCanvasBase.mpl_connnect`.
To disable default key press handling::
manager, canvas = figure.canvas.manager, figure.canvas
canvas.mpl_disconnect(manager.key_press_handler_id)
"""
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception to be caught
by :meth:`~matplotlib.figure.Figure.show`, for an
optional warning.
"""
raise NonGuiException()
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
""""For gui backends, resize the window (in pixels)."""
pass
def key_press(self, event):
"""
Implement the default mpl key bindings defined at
:ref:`key-event-handling`
"""
if rcParams['toolbar'] != 'toolmanager':
key_press_handler(event, self.canvas, self.canvas.toolbar)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None for non-GUI backends (e.g., a PS backend).
"""
return 'image'
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect for non-GUI backends (e.g., a PS backend).
"""
pass
cursors = tools.cursors
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
(None, None, None, None),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time
# of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed
# at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
"""Display a message on toolbar or in status bar"""
pass
def back(self, *args):
"""move back up the view lim stack"""
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
"""Draw a rectangle rubberband to indicate zoom limits"""
pass
def remove_rubberband(self):
"""Remove the rubberband"""
pass
def forward(self, *args):
"""Move forward in the view lim stack"""
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
"""Restore the original view"""
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def _set_cursor(self, event):
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active == 'ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active == 'PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
def mouse_move(self, event):
self._set_cursor(event)
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
artists = [a for a in event.inaxes.mouseover_set
if a.contains(event) and a.get_visible()]
if artists:
a = max(artists, key=lambda x: x.zorder)
if a is not event.inaxes.patch:
data = a.get_cursor_data(event)
if data is not None:
s += ' [%s]' % a.format_cursor_data(data)
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pan(self, *args):
"""Activate the pan/zoom tool. pan with left button, zoom with right"""
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
"""Called whenver a mouse button is pressed."""
pass
def press_pan(self, event):
"""the press mouse button in pan/zoom mode callback"""
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
"""the press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self.release(event)
self.draw()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a._get_view()))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
"""push the current view limits and position onto the stack"""
views = []
pos = []
for a in self.canvas.figure.get_axes():
views.append(a._get_view())
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self._views.push(views)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
"""this will be called whenever mouse button is released"""
pass
def release_pan(self, event):
"""the release mouse button callback in pan/zoom mode"""
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress:
return
self._xypress = []
self._button_pressed = None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
"""the drag callback in pan/zoom mode"""
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def drag_zoom(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.draw_rubberband(event, x, y, lastx, lasty)
def release_zoom(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
self.remove_rubberband()
if not self._xypress:
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
# allows the user to "cancel" a zoom action
# by zooming by less than 5 pixels
if ((abs(x - lastx) < 5 and self._zoom_mode!="y") or
(abs(y - lasty) < 5 and self._zoom_mode!="x")):
self._xypress = None
self.release(event)
self.draw()
return
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if self._button_pressed == 1:
direction = 'in'
elif self._button_pressed == 3:
direction = 'out'
else:
continue
a._set_view_from_bbox((lastx, lasty, x, y), direction,
self._zoom_mode, twinx, twiny)
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
"""Redraw the canvases, update the locators"""
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw_idle()
def _update_view(self):
"""Update the viewlim and position from the view and
position stack for each axes
"""
views = self._views()
if views is None:
return
pos = self._positions()
if pos is None:
return
for i, a in enumerate(self.canvas.figure.get_axes()):
a._set_view(views[i])
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.canvas.draw_idle()
def save_figure(self, *args):
"""Save the current figure"""
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
"""Reset the axes stack"""
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
"""Activate zoom to rect mode"""
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event',
self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event',
self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
"""Enable or disable back/forward button"""
pass
class ToolContainerBase(object):
"""
Base class for all tool containers, e.g. toolbars.
Attributes
----------
toolmanager : `ToolManager` object that holds the tools that
this `ToolContainer` wants to communicate with.
"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
self.toolmanager.toolmanager_connect('tool_removed_event',
self._remove_tool_cbk)
def _tool_toggled_cbk(self, event):
"""
Captures the 'tool_trigger_[name]'
This only gets used for toggled tools
"""
self.toggle_toolitem(event.tool.name, event.tool.toggled)
def add_tool(self, tool, group, position=-1):
"""
Adds a tool to this container
Parameters
----------
tool : tool_like
The tool to add, see `ToolManager.get_tool`.
group : str
The name of the group to add this tool to.
position : int (optional)
The position within the group to place this tool. Defaults to end.
"""
tool = self.toolmanager.get_tool(tool)
image = self._get_image_filename(tool.image)
toggle = getattr(tool, 'toggled', None) is not None
self.add_toolitem(tool.name, group, position,
image, tool.description, toggle)
if toggle:
self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
self._tool_toggled_cbk)
def _remove_tool_cbk(self, event):
"""Captures the 'tool_removed_event' signal and removes the tool"""
self.remove_toolitem(event.tool.name)
def _get_image_filename(self, image):
"""Find the image based on its name"""
# TODO: better search for images, they are not always in the
# datapath
basedir = os.path.join(rcParams['datapath'], 'images')
if image is not None:
fname = os.path.join(basedir, image)
else:
fname = None
return fname
def trigger_tool(self, name):
"""
Trigger the tool
Parameters
----------
name : String
Name(id) of the tool triggered from within the container
"""
self.toolmanager.trigger_tool(name, sender=self)
def add_toolitem(self, name, group, position, image, description, toggle):
"""
Add a toolitem to the container
This method must get implemented per backend
The callback associated with the button click event,
must be **EXACTLY** `self.trigger_tool(name)`
Parameters
----------
name : string
Name of the tool to add, this gets used as the tool's ID and as the
default label of the buttons
group : String
Name of the group that this tool belongs to
position : Int
Position of the tool within its group, if -1 it goes at the End
image_file : String
Filename of the image for the button or `None`
description : String
Description of the tool, used for the tooltips
toggle : Bool
* `True` : The button is a toggle (change the pressed/unpressed
state between consecutive clicks)
* `False` : The button is a normal button (returns to unpressed
state after release)
"""
raise NotImplementedError
def toggle_toolitem(self, name, toggled):
"""
Toggle the toolitem without firing event
Parameters
----------
name : String
Id of the tool to toggle
toggled : bool
Whether to set this tool as toggled or not.
"""
raise NotImplementedError
def remove_toolitem(self, name):
"""
Remove a toolitem from the `ToolContainer`
This method must get implemented per backend
Called when `ToolManager` emits a `tool_removed_event`
Parameters
----------
name : string
Name of the tool to remove
"""
raise NotImplementedError
class StatusbarBase(object):
"""Base class for the statusbar"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
self.toolmanager.toolmanager_connect('tool_message_event',
self._message_cbk)
def _message_cbk(self, event):
"""Captures the 'tool_message_event' and set the message"""
self.set_message(event.message)
def set_message(self, s):
"""
Display a message on toolbar or in status bar
Parameters
----------
s : str
Message text
"""
pass
|
ctuning/ck
|
refs/heads/master
|
incubator/connectme/testing/connectme/cli.py
|
1
|
import os
###########################################################################
def run(argv=None):
"""
Run CM from command line.
Args:
argv (str | list): CM input
Returns:
Dictionary:
return (int): return code == 0 if no error
>0 if error
(error) (str): error string if return>0
data from a given action
"""
# Aggregate CM input from argv
i = {}
con = False
if not argv:
import sys
argv = sys.argv[1:]
con = True
# Parse command line
r = parse(argv)
args = r['args']
options = r['options']
extra = r['extra']
# Useful if input is string
argv = r['argv']
# Check explicit help
if (len(args)==0 and ('h' in options or 'help' in options)) or \
(len(args)==0 and len(options)==0):
print_help()
exit(0)
# Aggregate into 1 dict:
i.update(options)
i['extra_cmd'] = extra
if len(args)>0:
i['module']=args[0]
if len(args)>1:
i['action']=args[1]
if len(args)>2:
i['data']=args[2]
if len(args)>3:
i['args']=args[3:]
# Access CM
from connectme import CM
cm = CM(con = con)
r = cm.init()
if r['return']>0: return r
return cm.access(i, argv)
###########################################################################
def print_help():
"""
Print command line help.
"""
print('usage: cm [module (data class)] [action] [data] [arguments] [options]')
###########################################################################
def parse(cmd):
"""
Parse command line.
Args:
cmd (str | list) : arguments as a string or list
Returns:
Dictionary::
args (list) : list of positional arguments
options (dict) : options
extra (str): string after --
"""
argv=cmd
# If input is string, convert to argv
if type(cmd) == str:
import shlex
argv=shlex.split(cmd)
# Positional arguments
args = []
# Options
options = {}
# Extra after --
extra = ''
# Parse
for i in range(0, len(argv)):
a=argv[i]
# Check if args or options
j=a.find('=')
if a=='--':
extra = ' '.join(argv[i+1:])
break
elif a.startswith('@'):
file_name=a[1:]
if os.path.isfile(file_name):
from connectme import io
r = io.load_json_or_yaml(file_name)
if r['return']>0: return r
options.update(r['data'])
elif j>0 or a.startswith('-') or a.startswith('--'):
v=True
k=a
if j>0:
v=a[j+1:]
k=a[:j]
if k.startswith('--'):
k = k[2:]
elif k.startswith('-'):
k = k[1:]
options[k] = v
else:
args.append(a)
return {'return':0, 'args': args, 'options': options, 'extra': extra, 'argv': argv}
###########################################################################
if __name__ == "__main__":
r = run()
exit(r['return'])
|
barma1309/Kalista
|
refs/heads/master
|
.virtualenvs/Kalista/lib/python3.4/site-packages/django/conf/locale/fr/__init__.py
|
12133432
| |
NikolaYolov/invenio_backup
|
refs/heads/master
|
modules/websubmit/lib/functions/__init__.py
|
12133432
| |
drukhil/frappe
|
refs/heads/master
|
frappe/email/doctype/newsletter/__init__.py
|
12133432
| |
kmoocdev/edx-platform
|
refs/heads/kmooc.rc0
|
common/djangoapps/track/__init__.py
|
12133432
| |
shikhardb/scikit-learn
|
refs/heads/master
|
sklearn/semi_supervised/tests/__init__.py
|
12133432
| |
greggian/TapdIn
|
refs/heads/master
|
django/utils/stopwords.py
|
13
|
# Performance note: I benchmarked this code using a set instead of
# a list for the stopwords and was surprised to find that the list
# performed /better/ than the set - maybe because it's only a small
# list.
stopwords = '''
i
a
an
are
as
at
be
by
for
from
how
in
is
it
of
on
or
that
the
this
to
was
what
when
where
'''.split()
def strip_stopwords(sentence):
"Removes stopwords - also normalizes whitespace"
words = sentence.split()
sentence = []
for word in words:
if word.lower() not in stopwords:
sentence.append(word)
return u' '.join(sentence)
|
verdurin/bcbio-nextgen
|
refs/heads/master
|
bcbio/structural/manta.py
|
3
|
"""Structural variant detection with the Manta caller from Illumina.
https://github.com/Illumina/manta
"""
import os
import sys
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
from bcbio.variation import vcfutils
from bcbio.provenance import do
def run(items):
"""Perform detection of structural variations with Manta.
"""
paired = vcfutils.get_paired(items)
work_dir = _sv_workdir(paired.tumor_data if paired else items[0])
workflow_file = _prep_config(items, paired, work_dir)
variant_file = _run_workflow(items, paired, workflow_file, work_dir)
sample_file = _select_sample(items, paired, variant_file, work_dir)
out = []
for data in items:
if "sv" not in data:
data["sv"] = []
data["sv"].append({"variantcaller": "manta",
"vrn_file": sample_file})
out.append(data)
return out
def _run_workflow(items, paired, workflow_file, work_dir):
"""Run manta analysis inside prepared workflow directory.
"""
data = paired.tumor_data if paired else items[0]
out_file = os.path.join(work_dir, "results", "variants",
"somaticSV.vcf.gz" if paired and paired.normal_bam else "diploidSV.vcf.gz")
if not utils.file_exists(out_file):
cmd = [sys.executable, workflow_file, "-m", "local", "-j", dd.get_num_cores(data),
"--quiet"]
do.run(cmd, "Run manta SV analysis")
return out_file
def _select_sample(items, paired, variant_file, work_dir):
"""Fix VCF to have the correct sample name and select tumor samples from paired analyses.
"""
sample_name = paired.tumor_name if paired else dd.get_sample_name(items[0])
out_file = os.path.join(work_dir, "%s-%s.vcf.gz" % (utils.splitext_plus(os.path.basename(variant_file))[0],
sample_name))
if not utils.file_uptodate(out_file, variant_file):
with file_transaction(items[0], out_file) as tx_out_file:
cmd = "zcat {variant_file} | "
if paired and paired.normal_bam:
cmd += "sed 's/\tTUMOR/\t{sample_name}/' | bcftools view -s {sample_name}"
else:
cmd += "sed 's/\tSAMPLE/\t{sample_name}/' | bcftools view -s {sample_name}"
cmd += " | bgzip -c > {tx_out_file}"
do.run(cmd.format(**locals()), "Run manta SV analysis")
return vcfutils.bgzip_and_index(out_file, items[0]["config"])
def _prep_config(items, paired, work_dir):
"""Run initial configuration, generating a run directory for Manta.
"""
assert utils.which("configManta.py"), "Could not find installed configManta.py"
out_file = os.path.join(work_dir, "runWorkflow.py")
if not utils.file_exists(out_file):
cmd = [sys.executable, utils.which("configManta.py")]
if paired:
if paired.normal_bam:
cmd += ["--normalBam=%s" % paired.normal_bam, "--tumorBam=%s" % paired.tumor_bam]
else:
cmd += ["--normalBam=%s" % paired.tumor_bam]
else:
assert len(items) == 1, "Expect a single item if non-paired for manta: %s" % \
([dd.get_sample_name(d) for d in items])
cmd += ["--normalBam=%s" % dd.get_align_bam(items[0])]
data = paired.tumor_data if paired else items[0]
cmd += ["--referenceFasta=%s" % dd.get_ref_file(data), "--runDir=%s" % work_dir]
if dd.get_coverage_interval(data) not in ["genome"]:
cmd += ["--exome"]
do.run(cmd, "Configure manta SV analysis")
return out_file
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "manta"))
|
talbrecht/pism_pik06
|
refs/heads/stable0.6
|
doc/site-packages/pybtex/kpathsea.py
|
5
|
# Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from subprocess import Popen, PIPE
def kpsewhich(filename):
p = Popen(['kpsewhich', filename], stdout=PIPE, stderr=PIPE)
path = p.communicate()[0].rstrip()
if p.returncode == 0:
return path
|
sjsucohort6/openstack
|
refs/heads/master
|
python/venv/lib/python2.7/site-packages/cinderclient/v2/limits.py
|
8
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cinderclient import base
class Limits(base.Resource):
"""A collection of RateLimit and AbsoluteLimit objects."""
def __repr__(self):
return "<Limits>"
@property
def absolute(self):
for (name, value) in list(self._info['absolute'].items()):
yield AbsoluteLimit(name, value)
@property
def rate(self):
for group in self._info['rate']:
uri = group['uri']
regex = group['regex']
for rate in group['limit']:
yield RateLimit(rate['verb'], uri, regex, rate['value'],
rate['remaining'], rate['unit'],
rate['next-available'])
class RateLimit(object):
"""Data model that represents a flattened view of a single rate limit."""
def __init__(self, verb, uri, regex, value, remain,
unit, next_available):
self.verb = verb
self.uri = uri
self.regex = regex
self.value = value
self.remain = remain
self.unit = unit
self.next_available = next_available
def __eq__(self, other):
return self.uri == other.uri \
and self.regex == other.regex \
and self.value == other.value \
and self.verb == other.verb \
and self.remain == other.remain \
and self.unit == other.unit \
and self.next_available == other.next_available
def __repr__(self):
return "<RateLimit: method=%s uri=%s>" % (self.verb, self.uri)
class AbsoluteLimit(object):
"""Data model that represents a single absolute limit."""
def __init__(self, name, value):
self.name = name
self.value = value
def __eq__(self, other):
return self.value == other.value and self.name == other.name
def __repr__(self):
return "<AbsoluteLimit: name=%s>" % (self.name)
class LimitsManager(base.Manager):
"""Manager object used to interact with limits resource."""
resource_class = Limits
def get(self):
"""Get a specific extension.
:rtype: :class:`Limits`
"""
return self._get("/limits", "limits")
|
wh6b/study-neo4j-vs-orientdb
|
refs/heads/master
|
src/performances/workload_04/cherche_en_masse.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import os
from py2neo import Graph
import pyorient
import random
import re
import sys
import threading
import time
from common import affiche_temps
class Entite(object):
'''Simule une response neo4J sous la forme { 'id':'identifiant de l objet', 'node':'l objet' }'''
def __init__(self,_id,_node):
self.id = _id
self.node = _node
class Cherche(object):
''' '''
def __init__(self,):
self.recherches = {
'AMISAMISAMISAMISPLUSJEUNEDESEXEDIFF': 'Ses amis et les amis de ses amis jusqu\'au 4e niveau de sexe different et du meme age ou plus jeune',
'MEMENUMEROPLUSAGES': 'Personnes plus agées habitant le meme numero que lui',
'MEMERUEMEILLEURSALAIRE': 'Personnes de sa rue qui ont un meilleur salaire que lui, trié par les plus hauts salaires',
'MEMETRAVAILPLUSJEUNEMEMESEXEAUTREVILLEMEILLEURSALAIRE': 'Personnes travaillant dans le meme domain, plus jeune et ayant un salaire plus elevé de meme sexe dans une autre ville, trié par les plus hauts salaires',
'LIENPLUSCOURT2PERSONNES': 'Lien social le plus court entre 2 personnes (sur 10 niveaux maximum)',
# Algo pas disponible pour OrientDB donc pas de comparatif possible
# 'TOUSLIENSPLUSCOURTS2PERSONNESMAX4NIVEAU':'Tous les liens socials plus court entre 2 personnes sur 4 niveaux maximum',
'SUMSALAIREFAMILLEADULTES': 'La somme des salaires de la famille des plus de 20 ans vivant dans le meme logement',
}
if self.nb_personnes == 0 or self.nb_habitations == 0:
print("La base ne doit pas etre vide d'habitants (Personne habitant dans un Numero")
print("Remplir la base avec les workload precedents")
exit(1)
if not os.path.isfile(self.filename) :
self.ecrit("STATUT;REQUETE;DUREE(MS);DUREE(S MS);NB_TROUVE;NB_NOEUDS;NB_ARETES")
def ecrit(self, _ligne):
f = open(self.filename,'a')
f.write("%s\n" % _ligne)
f.close()
print _ligne
def cherche(self, _num_cli):
#for recherche,label in self.recherches.iteritems():
desordre = self.recherches.items()
random.shuffle(desordre)
for recherche,label in desordre:
larequete = self.requetes[recherche]
print(" EXECUTE%s --> %s" % (_num_cli, larequete) )
t1=datetime.datetime.now()
try:
qte_resultat = self.execute_une_requete( larequete )
statut = "OK"
except Exception as e:
print("WARNING;%s; Erreur lors de l'execution de la requete.")
print("%s" % (e) )
statut = "KO"
qte_resultat = -1
t2=datetime.datetime.now()
print(" DUREE%s --> %s" % (_num_cli,affiche_temps(recherche, t2-t1)) )
self.ecrit( "%s;%s;%s;%s;%s" % (statut,affiche_temps(recherche, t2-t1), qte_resultat, self.nb_noeuds, self.nb_aretes) )
class ChercheOrientDB(Cherche):
''' '''
def __init__(self,):
self.client = pyorient.OrientDB(os.environ['ORIENTDBSERV'], int(os.environ['ORIENTDBPORT']))
self.session_id = self.client.connect( os.environ['ORIENTDBUSER'], os.environ['ORIENTDBPASS'] )
self.client.db_open( os.environ['ORIENTDBBASE'], os.environ['ORIENTDBUSER'], os.environ['ORIENTDBPASS'] )
self.nb_noeuds = self.client.command( "select count(*) from V")[0].count
self.nb_aretes = self.client.command( "select count(*) from E")[0].count
self.clusterpersonneid = self.client.command( "select from Personne limit 1")[0]._rid.split(':')[0]
self.clusterhabitatid = self.client.command( "select from Numero limit 1")[0]._rid.split(':')[0]
self.nb_personnes = self.client.data_cluster_count([int(self.clusterpersonneid[1:])])
self.nb_habitations = self.client.data_cluster_count([int(self.clusterhabitatid[1:])])
self.last_personne = Entite("#%s:0" % self.clusterpersonneid, {})
self.last_Habitation = Entite("#%s:0" % self.clusterhabitatid, {})
self.filename = "%s/workload_04/compteurs.workload_04.orientdb.csv" % os.environ['BENCH_HOME']
super(ChercheOrientDB, self).__init__()
def mk_requests(self,):
''' Genere des requetes avec des elements au hasard suivant les patterns'''
print self.get_random_personne()
print( "TESTDEVARIABLES => %s %s %s" % (self.get_random_personne().id, self.last_personne.node.sexe, self.last_personne.node.age) )
self.requetes = {
'AMISAMISAMISAMISPLUSJEUNEDESEXEDIFF': 'select from (SELECT expand( set(both("AMI").both("AMI").both("AMI").both("AMI")) ) FROM %s ) where sexe <> "%s" and %s >= age' % (self.get_random_personne().id, self.last_personne.node.sexe, self.last_personne.node.age),
'MEMENUMEROPLUSAGES': 'select from Personne where out("HABITE").no in (select out("HABITE").no from %s) and age >= %s' % (self.get_random_personne().id, self.last_personne.node.age),
'MEMERUEMEILLEURSALAIRE': 'select from Personne where out("HABITE").in("COMPORTE") in (select out("HABITE").in("COMPORTE") from %s) and salaire > %s order by salaire desc' % (self.get_random_personne().id, self.last_personne.node.salaire),
'MEMETRAVAILPLUSJEUNEMEMESEXEAUTREVILLEMEILLEURSALAIRE': 'select *, out("HABITE").in("COMPORTE").in("ORGANISE").libcom from Personne where out("HABITE").in("COMPORTE").in("ORGANISE") not in (select out("HABITE").in("COMPORTE").in("ORGANISE") from %s) and activite in (select activite from %s) and sexe in (select sexe from %s) and age <> %s and salaire > %s order by salaire desc' % (self.get_random_personne().id, self.last_personne.id, self.last_personne.id, self.last_personne.node.age, self.last_personne.node.salaire),
'LIENPLUSCOURT2PERSONNES': 'select shortestPath(%s, %s, "BOTH")' % (self.get_random_personne().id, self.get_random_personne().id),
# 'TOUSLIENSPLUSCOURTS2PERSONNESMAX4NIVEAU': 'ALGO PAS DISPONIBLE sur OrientDB' % (self.get_random_personne().id, self.get_random_personne().id),
'SUMSALAIREFAMILLEADULTES': 'select max(salaire) from Personne where out("HABITE") in (select out("HABITE") from %s) and nom = "%s"' % (self.get_random_personne().id, self.last_personne.node.nom),
}
def execute_une_requete(self, _req):
''' execute une recherche'''
r = self.client.command(_req)
#print " RESULTAT => %d entrees" % len(r)
return len(r)
def get_random_habitation(self,):
'''Retourne une habitation(Numero) pas utilise ni teste pb si on supprime des Numero !'''
p =[]
while len(p) == 0: habitat = self.client.command( "select from %s:%s" %(self.clusterhabitatid, random.randint(0,self.nb_habitations) ) )
self.last_habitation = Entite(habitat[0]._rid, habitat[0])
return Entite(habitat[0]._rid, habitat[0])
def get_random_personne(self,):
'''Retourne une personne deja crée au hasard'''
p =[]
while len(p) == 0: p = self.client.command( "select from %s:%s" % (self.clusterpersonneid, random.randint(0,self.nb_personnes)) )
self.last_personne = Entite(p[0]._rid, p[0])
return Entite(p[0]._rid, p[0])
def close(self):
self.client.db_close()
class ChercheNeo4J(Cherche):
''' '''
def __init__(self,):
self.graph = Graph(os.environ["NEO4JDBSTR"])
self.nb_habitations = self.graph.cypher.execute("match (n:Numero) return count(n) as count")[0].count
self.nb_personnes = self.graph.cypher.execute("match (p:Personne) return count(p) as count")[0].count
self.nb_noeuds = self.graph.cypher.execute( "match (n) return count(n) as count")[0].count
self.nb_aretes = self.graph.cypher.execute( "match ()-[r]->() return count(r) as count")[0].count
self.filename = "%s/workload_04/compteurs.workload_04.neo4j.csv" % os.environ['BENCH_HOME']
super(ChercheNeo4J, self).__init__()
def mk_requests(self,):
''' Genere des requetes avec des elements au hasard suivant les patterns'''
self.requetes = {
'AMISAMISAMISAMISPLUSJEUNEDESEXEDIFF':
'match (n:Personne)-[:AMI*..4]-(m:Personne) where id(n) = %d and n.age >= m.age and n.sexe <> m.sexe return n,m' % (self.get_random_personne().id),
'MEMENUMEROPLUSAGES':
'match (p1:Personne)-[:HABITE]-(r1:Numero) ,(p2:Personne)-[:HABITE]-(r2:Numero) where id(p1) = %d and p1.age < p2.age and id(p1)<>id(p2) and r1.no = r2.no return p1,r1,p2,r2' % (self.get_random_personne().id),
'MEMERUEMEILLEURSALAIRE': 'match (p:Personne)-[:HABITE]->(n:Numero)<-[:COMPORTE]-(v:Voie)-[:COMPORTE]->(nn:Numero)<-[:HABITE]-(pp:Personne) where id(p) = %d and p.salaire < pp.salaire return p,n,v,nn,pp ORDER BY pp.salaire DESC' % (self.get_random_personne().id),
'MEMETRAVAILPLUSJEUNEMEMESEXEAUTREVILLEMEILLEURSALAIRE': 'start p=node(%d) match (p)-[:HABITE]->(n:Numero)<-[:COMPORTE]-(v:Voie)<-[:ORGANISE]-(c:Commune),(pp:Personne)-[:HABITE]->(nn:Numero)<-[:COMPORTE]-(vv:Voie)<-[:ORGANISE]-(cc:Commune) where p.sexe = pp.sexe and p.activite = pp.activite and c.libcom <> cc.libcom return p,pp,v,vv,c,cc order by pp.salaire DESC' % (self.get_random_personne().id),
# Pour les requetes avec allShortestPaths et shortestPath on ne peut pas retourner p a cause d'un bug (https://github.com/nigelsmall/py2neo/issues/400) mais p, donc le chemin, est quand meme calculer donc ca nous va pour la mesure.
'LIENPLUSCOURT2PERSONNES':'start p1=node(%d) , p2=node(%d) match p=shortestPath((p1)-[*..10]-(p2)) return p1' % (self.get_random_personne().id, self.get_random_personne().id),
'TOUSLIENSPLUSCOURTS2PERSONNESMAX4NIVEAU':'start p1=node(%d) , p2=node(%d) match p=allShortestPaths((p1)-[*..4]-(p2)) return p1' % (self.get_random_personne().id, self.get_random_personne().id),
'SUMSALAIREFAMILLEADULTES':'start p=node(%d) match (n:Numero)<-[:HABITE]-(p)-[:FAMILLE]-(pp:Personne)-[:HABITE]->(nn:Numero) where n = nn and toInt(pp.age) > 20 return sum(toInt(pp.salaire))+toInt(p.salaire)' % (self.get_random_personne().id),
}
def execute_une_requete(self, _req):
''' execute une recherche'''
#req = self.graph.cypher.execute(_req)
## Passage par une transaction pour pouvoir utiliser les functions shortpath etc.
tx = self.graph.cypher.begin()
tx.append(_req)
tx.process()
def get_random_habitation(self,):
'''Retourne une habitation(Numero) vide (le seul lien qu'elle a est le lien du Numero à sa rue) en random'''
habitat, = self.graph.cypher.execute( "match (n:Numero) where not ()-[:HABITE]->(n) return id(n) as id,n as node SKIP %s LIMIT 1" %( random.randint(0,self.nb_habitations) ) )
return habitat
def get_random_personne(self,):
'''Retourne une personne deja crée au hasard
Parfois de facon sporadique on a un resultat vide de la requete suivante !!'''
p =[]
while len(p) == 0: p = self.graph.cypher.execute( "match (n:Personne) return id(n) as id,n as node SKIP %s LIMIT 1" %( random.randint(0,self.nb_personnes) ) )
return p[0]
def close(self):
pass
def usage():
print("usage: %s orientdb | neo4j" % sys.argv[0] )
exit(1)
def executer(_occurence, _cherche):
_cherche.mk_requests()
t1=datetime.datetime.now()
_cherche.cherche(_occurence)
_cherche.close()
print affiche_temps("FINCLIENT%s" % _occurence, datetime.datetime.now() - t1)
if __name__ == "__main__":
## Nombre de sequences de requetes
NB_BOUCLE = 1000
## Nombre de clients concurrents qui vont executer les requetes
NB_CONCURRENT = 5
if len(sys.argv) != 2 or not (sys.argv[1] in ("orientdb","neo4j")) :
usage()
## Par thread boucle
for thread_num in range(0,NB_BOUCLE):
for thread_num in range(0,NB_CONCURRENT):
print ("DEBUTCLIENT%s" % thread_num)
if sys.argv[1] == "orientdb":
cherche = ChercheOrientDB()
elif sys.argv[1] == "neo4j":
cherche = ChercheNeo4J()
try:
thread = threading.Thread(target = executer, args = (thread_num,cherche,))
thread.start()
except:
print "Probleme lancement du thread client."
# On attend
# <_MainThread(MainThread, started 139670512949056)> reste dans la pile. bizarre ?
while threading.activeCount() > 1:
print "Threads en cours : %s" % (threading.activeCount()-1)
time.sleep(3)
'''
## Par boucle non concurrente si pas plusieurs coeurs
if sys.argv[1] == "orientdb":
cherche = ChercheOrientDB()
elif sys.argv[1] == "neo4j":
cherche = ChercheNeo4J()
for thread_num in range(0,NB_BOUCLE):
executer(thread_num,cherche)
cherche.close()
'''
|
maxamillion/ansible-modules-core
|
refs/heads/devel
|
cloud/rackspace/rax_clb_nodes.py
|
51
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_clb_nodes
short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
description:
- Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
version_added: "1.4"
options:
address:
required: false
description:
- IP address or domain name of the node
condition:
required: false
choices:
- enabled
- disabled
- draining
description:
- Condition for the node, which determines its role within the load
balancer
load_balancer_id:
required: true
type: integer
description:
- Load balancer id
node_id:
required: false
type: integer
description:
- Node id
port:
required: false
type: integer
description:
- Port number of the load balanced service on the node
state:
required: false
default: "present"
choices:
- present
- absent
description:
- Indicate desired state of the node
type:
required: false
choices:
- primary
- secondary
description:
- Type of node
wait:
required: false
default: "no"
choices:
- "yes"
- "no"
description:
- Wait for the load balancer to become active before returning
wait_timeout:
required: false
type: integer
default: 30
description:
- How long to wait before giving up and returning an error
weight:
required: false
description:
- Weight of node
author: "Lukasz Kawczynski (@neuroid)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
# Add a new node to the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
address: 10.2.2.3
port: 80
condition: enabled
type: primary
wait: yes
credentials: /path/to/credentials
# Drain connections from a node
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
condition: draining
wait: yes
credentials: /path/to/credentials
# Remove a node from the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
state: absent
wait: yes
credentials: /path/to/credentials
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def _activate_virtualenv(path):
path = os.path.expanduser(path)
activate_this = os.path.join(path, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
def _get_node(lb, node_id=None, address=None, port=None):
"""Return a matching node"""
for node in getattr(lb, 'nodes', []):
match_list = []
if node_id is not None:
match_list.append(getattr(node, 'id', None) == node_id)
if address is not None:
match_list.append(getattr(node, 'address', None) == address)
if port is not None:
match_list.append(getattr(node, 'port', None) == port)
if match_list and all(match_list):
return node
return None
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
address=dict(),
condition=dict(choices=['enabled', 'disabled', 'draining']),
load_balancer_id=dict(required=True, type='int'),
node_id=dict(type='int'),
port=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
type=dict(choices=['primary', 'secondary']),
virtualenv=dict(),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=30, type='int'),
weight=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
address = module.params['address']
condition = (module.params['condition'] and
module.params['condition'].upper())
load_balancer_id = module.params['load_balancer_id']
node_id = module.params['node_id']
port = module.params['port']
state = module.params['state']
typ = module.params['type'] and module.params['type'].upper()
virtualenv = module.params['virtualenv']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout'] or 1
weight = module.params['weight']
if virtualenv:
try:
_activate_virtualenv(virtualenv)
except IOError as e:
module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
virtualenv, e))
setup_rax_module(module, pyrax)
if not pyrax.cloud_loadbalancers:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
try:
lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
node = _get_node(lb, node_id, address, port)
result = rax_clb_node_to_dict(node)
if state == 'absent':
if not node: # Removing a non-existent node
module.exit_json(changed=False, state=state)
try:
lb.delete_node(node)
result = {}
except pyrax.exc.NotFound:
module.exit_json(changed=False, state=state)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
else: # present
if not node:
if node_id: # Updating a non-existent node
msg = 'Node %d not found' % node_id
if lb.nodes:
msg += (' (available nodes: %s)' %
', '.join([str(x.id) for x in lb.nodes]))
module.fail_json(msg=msg)
else: # Creating a new node
try:
node = pyrax.cloudloadbalancers.Node(
address=address, port=port, condition=condition,
weight=weight, type=typ)
resp, body = lb.add_nodes([node])
result.update(body['nodes'][0])
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
else: # Updating an existing node
mutable = {
'condition': condition,
'type': typ,
'weight': weight,
}
for name, value in mutable.items():
if value is None or value == getattr(node, name):
mutable.pop(name)
if not mutable:
module.exit_json(changed=False, state=state, node=result)
try:
# The diff has to be set explicitly to update node's weight and
# type; this should probably be fixed in pyrax
lb.update_node(node, diff=mutable)
result.update(mutable)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
if wait:
pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
attempts=wait_timeout)
if lb.status != 'ACTIVE':
module.fail_json(
msg='Load balancer not active after %ds (current status: %s)' %
(wait_timeout, lb.status.lower()))
kwargs = {'node': result} if result else {}
module.exit_json(changed=True, state=state, **kwargs)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
|
spisneha25/django
|
refs/heads/master
|
tests/middleware/extra_urls.py
|
487
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^customurlconf/noslash$', views.empty_view),
url(r'^customurlconf/slash/$', views.empty_view),
url(r'^customurlconf/needsquoting#/$', views.empty_view),
]
|
benjohnston24/partyCoin-interface
|
refs/heads/master
|
funds_tracker/__init__.py
|
12133432
| |
morpheby/levelup-by
|
refs/heads/master
|
lms/djangoapps/lms_migration/__init__.py
|
12133432
| |
wasit7/tutorials
|
refs/heads/master
|
django/django_generic_view/cp/myapp/__init__.py
|
12133432
| |
opelr/boardgames
|
refs/heads/master
|
boardgames/__init__.py
|
12133432
| |
clincher/phuketstash
|
refs/heads/master
|
users/__init__.py
|
12133432
| |
alesdotio/Spirit
|
refs/heads/master
|
spirit/core/templatetags/__init__.py
|
12133432
| |
roth1002/thefuck
|
refs/heads/master
|
thefuck/specific/__init__.py
|
12133432
| |
macressler/mplayer2
|
refs/heads/master
|
TOOLS/vdpau_functions.py
|
6
|
#!/usr/bin/env python3
# Generate vdpau_template.c
functions = """
# get_error_string should be first, because the function lookup loop should
# have it available to print errors for other functions
get_error_string
bitmap_surface_create
bitmap_surface_destroy
bitmap_surface_put_bits_native
bitmap_surface_query_capabilities
decoder_create
decoder_destroy
decoder_render
device_destroy
generate_csc_matrix GenerateCSCMatrix # CSC completely capitalized
output_surface_create
output_surface_destroy
output_surface_get_bits_native
output_surface_put_bits_indexed
output_surface_put_bits_native
output_surface_render_bitmap_surface
output_surface_render_output_surface
preemption_callback_register
presentation_queue_block_until_surface_idle
presentation_queue_create
presentation_queue_destroy
presentation_queue_display
presentation_queue_get_time
presentation_queue_query_surface_status
presentation_queue_target_create_x11
presentation_queue_target_destroy
video_mixer_create
video_mixer_destroy
video_mixer_query_feature_support
video_mixer_render
video_mixer_set_attribute_values
video_mixer_set_feature_enables
video_surface_create
video_surface_destroy
video_surface_put_bits_y_cb_cr
"""
print("""
/* List the VDPAU functions used by MPlayer.
* Generated by vdpau_functions.py.
* First argument on each line is the VDPAU function type name,
* second macro name needed to get function address,
* third name MPlayer uses for the function.
*/
""")
for line in functions.splitlines():
parts = line.split('#')[0].strip().split()
if not parts:
continue # empty/comment line
if len(parts) > 1:
mp_name, vdpau_name = parts
else:
mp_name = parts[0]
vdpau_name = ''.join(part.capitalize() for part in mp_name.split('_'))
macro_name = mp_name.upper()
print('VDP_FUNCTION(Vdp%s, VDP_FUNC_ID_%s, %s)' % (vdpau_name, macro_name, mp_name))
|
wanghaven/nupic
|
refs/heads/master
|
src/nupic/datafiles/extra/gym/raw/makeDataset.py
|
27
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2010-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unify the various Gym CSV files to a single coherent CSV file
The Gym dataset has two file types:
1. Hourly attendance data per gym
2. KW consumption in 15 minutes intervals
The createDataset() function merges the two file types and creates
a single CSV file with hourly data. Each record contains the following fields:
Gym name, Date, Hour, # Atendees, KW consumption
"""
import os
import sys
import fileinput
import glob
import operator
import datetime
from nupic.data.file import File
months = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
class Record(object):
def __init__(self):
self.club = ''
self.date = None
self.time = 0
self.KW = 0
self.attendeeCount = 0
self.consumption = 0
class Club(object):
def __init__(self, name):
self.name = name
self.records = {}
def processAttendance(self, f):
# Skip first two
line = f.next()
assert line == ',,,,,,,,,,,,,,,,,,,\n'
line = f.next()
assert line == 'Date Of Swipe, < 6 am,6-7 am,7-8 am,8-9 am,9-10 am,10-11 am,11-12 am,12-1 pm,1-2 pm,2-3 pm,3-4 pm,4-5 pm,5-6 pm,6-7 pm,7-8 pm,8-9 pm,9-10 pm,> 10 pm,Totals\n'
for i, line in enumerate(f):
# Check weather we're done with this club
if line == ',,,,,,,,,,,,,,,,,,,\n':
# skip next two lines
line = f.next()
assert line.startswith('Club Totals:')
line = f.next()
assert line == ',,,,,,,,,,,,,,,,,,,\n'
return
else:
self.addRecord(line)
def addRecord(self, line):
fields = line.split(',')
assert len(fields) == 20
date = fields[0].split('-')
# Convert day to 'dd'
dd = int(date[0])
mm = months.index(date[1]) + 1
assert mm in (9, 10)
# Convert year from 'yy' to 'yyyy'
yyyy = 2000 + int(date[2])
date = (yyyy, mm, dd)
# Add 0 for hours without attendants (<12AM-4AM and 11PM)
attendance = [0] * 5 + fields[1:19] + [0]
assert len(attendance) == 24
# Create a record for each hour of the day.
for i, a in enumerate(attendance):
r = Record()
r.club = self.name
r.timestamp = datetime.datetime(yyyy, mm, dd, i)
#r.time = i
r.attendeeCount = a
self.records[(date, i)] = r
def updateRecord(self, date, t, consumption):
# Get rid of time and AM/PM if needed
date = date.split()[0]
# Convert to (yyyy, mmm, dd)
date = date.split('/')
# Convert day to 'dd'
dd = int(date[0])
# Convert month index to month name
mm = int(date[1])
yyyy = int(date[2])
# Locate record
key = ((yyyy, mm, dd), t)
if not key in self.records:
print self.name, 'is missing attendance data for', key
else:
r = self.records[key]
r.consumption = consumption
def processClubAttendance(f, clubs):
"""Process the attendance data of one club
If the club already exists in the list update its data.
If the club is new create a new Club object and add it to the dict
The next step is to iterate over all the lines and add a record for each line.
When reaching an empty line it means there are no more records for this club.
Along the way some redundant lines are skipped. When the file ends the f.next()
call raises a StopIteration exception and that's the sign to return False,
which indicates to the caller that there are no more clubs to process.
"""
try:
# Skip as many empty lines as necessary (file format inconsistent)
line = f.next()
while line == ',,,,,,,,,,,,,,,,,,,\n':
line = f.next()
# The first non-empty line should have the name as the first field
name = line.split(',')[0]
# Create a new club object if needed
if name not in clubs:
clubs[name] = Club(name)
# Get the named club
c = clubs[name]
c.processAttendance(f)
return True
except StopIteration:
return False
def processClubConsumption(f, clubs):
"""Process the consumption a club
- Skip the header line
- Iterate over lines
- Read 4 records at a time
- Parse each line: club, date, time, consumption
- Get club object from dictionary if needed
- Aggregate consumption
- Call club.processConsumption() with data
"""
try:
# Skip header line
line = f.next()
assert line.endswith('" ","SITE_LOCATION_NAME","TIMESTAMP","TOTAL_KWH"\n')
valid_times = range(24)
t = 0 # used to track time
club = None
clubName = None
lastDate = None
while True:
assert t in valid_times
consumption = 0
for x in range(4):
# Read the line and get rid of the newline character
line = f.next()[:-1]
fields = line.split(',')
assert len(fields) == 4
for i, field in enumerate(fields):
# Strip the redundant double quotes
assert field[0] == '"' and field[-1] == '"'
fields[i] = field[1:-1]
# Ignoring field 0, which is just a running count
# Get the club name
name = fields[1]
# Hack to fix inconsistent club names like: "Melbourne CBD - Melbourne Central" vs. "Melbourne Central"
partialNames = ('Melbourne Central', 'North Sydney', 'Park St', 'Pitt St')
for pn in partialNames:
if pn in name:
name = pn
# Locate the club if needed (maybe )
if name != clubName:
clubName = name
club = clubs[name]
# Split the date (time is counted using the t variable)
tokens = fields[2].split()
# Verify that t == 0 and consumption == 0 when there is no time in the file
if len(tokens) == 1:
assert consumption == 0 and t == 0
# The first (and sometimes only) token is the date
date = tokens[0]
# Aggregate the consumption
consumption += float(fields[3])
# Update the Club object after aggregating the consumption of 4 lines
club.updateRecord(date, t, consumption)
# Increment time
t += 1
t %= 24
except StopIteration:
return
def processAttendanceFiles():
files = glob.glob('Attendance*.csv')
f = fileinput.input(files=files)
# Process the input files and create a dictionary of Club objects
clubs = {}
while processClubAttendance(f, clubs):
pass
return clubs
def processConsumptionFiles(clubs):
"""
"""
files = glob.glob('all_group*detail.csv')
f = fileinput.input(files=files)
# Process the input files and create a dictionary of Club objects
while processClubConsumption(f, clubs):
pass
return clubs
def makeDataset():
"""
"""
clubs = processAttendanceFiles()
clubs = processConsumptionFiles(clubs)
fields = [('gym', 'string', 'S'),
('timestamp', 'datetime', 'T'),
('attendeeCount', 'int', ''),
('consumption', 'float', ''),
]
with File('gym.csv', fields) as f:
## write header
#f.write('Gym Name,Date,Time,Attendee Count,Consumption (KWH)\n')
for c in clubs.values():
for k, r in sorted(c.records.iteritems(), key=operator.itemgetter(0)):
#dd = r.date[2]
#mm = r.date[1]
#yyyy = r.date[0]
#line = ','.join(str(x) for x in
# (c.name, '%d-%s-%d' % (dd, mmm, yyyy), r.time, r.attendeeCount, r.consumption))
#f.write(line + '\n')
f.write([r.club, r.timestamp, r.attendeeCount, r.consumption])
if __name__=='__main__':
makeDataset()
print 'Done.'
|
rgmining/ria
|
refs/heads/master
|
tests/test_suite.py
|
1
|
#
# test_suite.py
#
# Copyright (c) 2016-2017 Junpei Kawamoto
#
# This file is part of rgmining-ria.
#
# rgmining-ria is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rgmining-ria is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rgmining-ria. If not, see <http://www.gnu.org/licenses/>.
#
"""Test suite.
"""
from __future__ import absolute_import, print_function
import importlib
import sys
import unittest
TESTS = (
"tests.bipartite_test",
"tests.bipartite_sum_test",
"tests.credibility_test",
"tests.one_test",
)
"""Collection of test modules."""
def suite():
"""Returns a test suite.
"""
loader = unittest.TestLoader()
res = unittest.TestSuite()
for t in TESTS:
mod = importlib.import_module(t)
res.addTest(loader.loadTestsFromModule(mod))
return res
def main():
"""The main function.
Returns:
Status code.
"""
try:
res = unittest.TextTestRunner(verbosity=2).run(suite())
except KeyboardInterrupt:
print("Test canceled.")
return -1
else:
return 0 if res.wasSuccessful() else 1
if __name__ == "__main__":
sys.exit(main())
|
ChristineLaMuse/mozillians
|
refs/heads/master
|
vendor-local/lib/python/unidecode/x080.py
|
252
|
data = (
'Yao ', # 0x00
'Lao ', # 0x01
'[?] ', # 0x02
'Kao ', # 0x03
'Mao ', # 0x04
'Zhe ', # 0x05
'Qi ', # 0x06
'Gou ', # 0x07
'Gou ', # 0x08
'Gou ', # 0x09
'Die ', # 0x0a
'Die ', # 0x0b
'Er ', # 0x0c
'Shua ', # 0x0d
'Ruan ', # 0x0e
'Er ', # 0x0f
'Nai ', # 0x10
'Zhuan ', # 0x11
'Lei ', # 0x12
'Ting ', # 0x13
'Zi ', # 0x14
'Geng ', # 0x15
'Chao ', # 0x16
'Hao ', # 0x17
'Yun ', # 0x18
'Pa ', # 0x19
'Pi ', # 0x1a
'Chi ', # 0x1b
'Si ', # 0x1c
'Chu ', # 0x1d
'Jia ', # 0x1e
'Ju ', # 0x1f
'He ', # 0x20
'Chu ', # 0x21
'Lao ', # 0x22
'Lun ', # 0x23
'Ji ', # 0x24
'Tang ', # 0x25
'Ou ', # 0x26
'Lou ', # 0x27
'Nou ', # 0x28
'Gou ', # 0x29
'Pang ', # 0x2a
'Ze ', # 0x2b
'Lou ', # 0x2c
'Ji ', # 0x2d
'Lao ', # 0x2e
'Huo ', # 0x2f
'You ', # 0x30
'Mo ', # 0x31
'Huai ', # 0x32
'Er ', # 0x33
'Zhe ', # 0x34
'Ting ', # 0x35
'Ye ', # 0x36
'Da ', # 0x37
'Song ', # 0x38
'Qin ', # 0x39
'Yun ', # 0x3a
'Chi ', # 0x3b
'Dan ', # 0x3c
'Dan ', # 0x3d
'Hong ', # 0x3e
'Geng ', # 0x3f
'Zhi ', # 0x40
'[?] ', # 0x41
'Nie ', # 0x42
'Dan ', # 0x43
'Zhen ', # 0x44
'Che ', # 0x45
'Ling ', # 0x46
'Zheng ', # 0x47
'You ', # 0x48
'Wa ', # 0x49
'Liao ', # 0x4a
'Long ', # 0x4b
'Zhi ', # 0x4c
'Ning ', # 0x4d
'Tiao ', # 0x4e
'Er ', # 0x4f
'Ya ', # 0x50
'Die ', # 0x51
'Gua ', # 0x52
'[?] ', # 0x53
'Lian ', # 0x54
'Hao ', # 0x55
'Sheng ', # 0x56
'Lie ', # 0x57
'Pin ', # 0x58
'Jing ', # 0x59
'Ju ', # 0x5a
'Bi ', # 0x5b
'Di ', # 0x5c
'Guo ', # 0x5d
'Wen ', # 0x5e
'Xu ', # 0x5f
'Ping ', # 0x60
'Cong ', # 0x61
'Shikato ', # 0x62
'[?] ', # 0x63
'Ting ', # 0x64
'Yu ', # 0x65
'Cong ', # 0x66
'Kui ', # 0x67
'Tsuraneru ', # 0x68
'Kui ', # 0x69
'Cong ', # 0x6a
'Lian ', # 0x6b
'Weng ', # 0x6c
'Kui ', # 0x6d
'Lian ', # 0x6e
'Lian ', # 0x6f
'Cong ', # 0x70
'Ao ', # 0x71
'Sheng ', # 0x72
'Song ', # 0x73
'Ting ', # 0x74
'Kui ', # 0x75
'Nie ', # 0x76
'Zhi ', # 0x77
'Dan ', # 0x78
'Ning ', # 0x79
'Qie ', # 0x7a
'Ji ', # 0x7b
'Ting ', # 0x7c
'Ting ', # 0x7d
'Long ', # 0x7e
'Yu ', # 0x7f
'Yu ', # 0x80
'Zhao ', # 0x81
'Si ', # 0x82
'Su ', # 0x83
'Yi ', # 0x84
'Su ', # 0x85
'Si ', # 0x86
'Zhao ', # 0x87
'Zhao ', # 0x88
'Rou ', # 0x89
'Yi ', # 0x8a
'Le ', # 0x8b
'Ji ', # 0x8c
'Qiu ', # 0x8d
'Ken ', # 0x8e
'Cao ', # 0x8f
'Ge ', # 0x90
'Di ', # 0x91
'Huan ', # 0x92
'Huang ', # 0x93
'Yi ', # 0x94
'Ren ', # 0x95
'Xiao ', # 0x96
'Ru ', # 0x97
'Zhou ', # 0x98
'Yuan ', # 0x99
'Du ', # 0x9a
'Gang ', # 0x9b
'Rong ', # 0x9c
'Gan ', # 0x9d
'Cha ', # 0x9e
'Wo ', # 0x9f
'Chang ', # 0xa0
'Gu ', # 0xa1
'Zhi ', # 0xa2
'Han ', # 0xa3
'Fu ', # 0xa4
'Fei ', # 0xa5
'Fen ', # 0xa6
'Pei ', # 0xa7
'Pang ', # 0xa8
'Jian ', # 0xa9
'Fang ', # 0xaa
'Zhun ', # 0xab
'You ', # 0xac
'Na ', # 0xad
'Hang ', # 0xae
'Ken ', # 0xaf
'Ran ', # 0xb0
'Gong ', # 0xb1
'Yu ', # 0xb2
'Wen ', # 0xb3
'Yao ', # 0xb4
'Jin ', # 0xb5
'Pi ', # 0xb6
'Qian ', # 0xb7
'Xi ', # 0xb8
'Xi ', # 0xb9
'Fei ', # 0xba
'Ken ', # 0xbb
'Jing ', # 0xbc
'Tai ', # 0xbd
'Shen ', # 0xbe
'Zhong ', # 0xbf
'Zhang ', # 0xc0
'Xie ', # 0xc1
'Shen ', # 0xc2
'Wei ', # 0xc3
'Zhou ', # 0xc4
'Die ', # 0xc5
'Dan ', # 0xc6
'Fei ', # 0xc7
'Ba ', # 0xc8
'Bo ', # 0xc9
'Qu ', # 0xca
'Tian ', # 0xcb
'Bei ', # 0xcc
'Gua ', # 0xcd
'Tai ', # 0xce
'Zi ', # 0xcf
'Ku ', # 0xd0
'Zhi ', # 0xd1
'Ni ', # 0xd2
'Ping ', # 0xd3
'Zi ', # 0xd4
'Fu ', # 0xd5
'Pang ', # 0xd6
'Zhen ', # 0xd7
'Xian ', # 0xd8
'Zuo ', # 0xd9
'Pei ', # 0xda
'Jia ', # 0xdb
'Sheng ', # 0xdc
'Zhi ', # 0xdd
'Bao ', # 0xde
'Mu ', # 0xdf
'Qu ', # 0xe0
'Hu ', # 0xe1
'Ke ', # 0xe2
'Yi ', # 0xe3
'Yin ', # 0xe4
'Xu ', # 0xe5
'Yang ', # 0xe6
'Long ', # 0xe7
'Dong ', # 0xe8
'Ka ', # 0xe9
'Lu ', # 0xea
'Jing ', # 0xeb
'Nu ', # 0xec
'Yan ', # 0xed
'Pang ', # 0xee
'Kua ', # 0xef
'Yi ', # 0xf0
'Guang ', # 0xf1
'Gai ', # 0xf2
'Ge ', # 0xf3
'Dong ', # 0xf4
'Zhi ', # 0xf5
'Xiao ', # 0xf6
'Xiong ', # 0xf7
'Xiong ', # 0xf8
'Er ', # 0xf9
'E ', # 0xfa
'Xing ', # 0xfb
'Pian ', # 0xfc
'Neng ', # 0xfd
'Zi ', # 0xfe
'Gui ', # 0xff
)
|
frank10704/DF_GCS_W
|
refs/heads/master
|
MissionPlanner-master/packages/IronPython.StdLib.2.7.5-beta1/content/Lib/wsgiref/__init__.py
|
71
|
"""wsgiref -- a WSGI (PEP 333) Reference Library
Current Contents:
* util -- Miscellaneous useful functions and wrappers
* headers -- Manage response headers
* handlers -- base classes for server/gateway implementations
* simple_server -- a simple BaseHTTPServer that supports WSGI
* validate -- validation wrapper that sits between an app and a server
to detect errors in either
To-Do:
* cgi_gateway -- Run WSGI apps under CGI (pending a deployment standard)
* cgi_wrapper -- Run CGI apps under WSGI
* router -- a simple middleware component that handles URL traversal
"""
|
RafaelRMachado/qtwebkit
|
refs/heads/dev
|
Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
|
124
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.buildbot import BuildBot, Builder, Build
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class BuilderTest(unittest.TestCase):
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def _install_fetch_build(self, failure):
def _mock_fetch_build(build_number):
build = Build(
builder=self.builder,
build_number=build_number,
revision=build_number + 1000,
is_green=build_number < 4
)
results = [self._mock_test_result(testname) for testname in failure(build_number)]
layout_test_results = LayoutTestResults(results)
def mock_layout_test_results():
return layout_test_results
build.layout_test_results = mock_layout_test_results
return build
self.builder._fetch_build = _mock_fetch_build
def setUp(self):
self.buildbot = BuildBot()
self.builder = Builder(u"Test Builder \u2661", self.buildbot)
self._install_fetch_build(lambda build_number: ["test1", "test2"])
def test_latest_layout_test_results(self):
self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults([self._mock_test_result(testname) for testname in ["test1", "test2"]])
self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
self.assertTrue(self.builder.latest_layout_test_results())
def test_find_regression_window(self):
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1003)
self.assertEqual(regression_window.failing_build().revision(), 1004)
regression_window = self.builder.find_regression_window(self.builder.build(10), look_back_limit=2)
self.assertIsNone(regression_window.build_before_failure())
self.assertEqual(regression_window.failing_build().revision(), 1008)
def test_none_build(self):
self.builder._fetch_build = lambda build_number: None
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertIsNone(regression_window.build_before_failure())
self.assertIsNone(regression_window.failing_build())
def test_flaky_tests(self):
self._install_fetch_build(lambda build_number: ["test1"] if build_number % 2 else ["test2"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1009)
self.assertEqual(regression_window.failing_build().revision(), 1010)
def test_failure_and_flaky(self):
self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1003)
self.assertEqual(regression_window.failing_build().revision(), 1004)
def test_no_results(self):
self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1003)
self.assertEqual(regression_window.failing_build().revision(), 1004)
def test_failure_after_flaky(self):
self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number > 6 else ["test3"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1006)
self.assertEqual(regression_window.failing_build().revision(), 1007)
def test_find_blameworthy_regression_window(self):
self.assertEqual(self.builder.find_blameworthy_regression_window(10).revisions(), [1004])
self.assertIsNone(self.builder.find_blameworthy_regression_window(10, look_back_limit=2))
# Flakey test avoidance requires at least 2 red builds:
self.assertIsNone(self.builder.find_blameworthy_regression_window(4))
self.assertEqual(self.builder.find_blameworthy_regression_window(4, avoid_flakey_tests=False).revisions(), [1004])
# Green builder:
self.assertIsNone(self.builder.find_blameworthy_regression_window(3))
def test_build_caching(self):
self.assertEqual(self.builder.build(10), self.builder.build(10))
def test_build_and_revision_for_filename(self):
expectations = {
"r47483 (1)/" : (47483, 1),
"r47483 (1).zip" : (47483, 1),
"random junk": None,
}
for filename, revision_and_build in expectations.items():
self.assertEqual(self.builder._revision_and_build_for_filename(filename), revision_and_build)
def test_file_info_list_to_revision_to_build_list(self):
file_info_list = [
{"filename": "r47483 (1)/"},
{"filename": "r47483 (1).zip"},
{"filename": "random junk"},
]
builds_and_revisions_list = [(47483, 1), (47483, 1)]
self.assertEqual(self.builder._file_info_list_to_revision_to_build_list(file_info_list), builds_and_revisions_list)
def test_fetch_build(self):
buildbot = BuildBot()
builder = Builder(u"Test Builder \u2661", buildbot)
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision": None, # revision=None means a trunk build started from the force-build button on the builder page.
},
"number": int(build_number),
# Intentionally missing the 'results' key, meaning it's a "pass" build.
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
self.assertIsNotNone(builder._fetch_build(1))
class BuildTest(unittest.TestCase):
def test_layout_test_results(self):
buildbot = BuildBot()
builder = Builder(u"Foo Builder (test)", buildbot)
builder._fetch_file_from_results = lambda results_url, file_name: None
build = Build(builder, None, None, None)
# Test that layout_test_results() returns None if the fetch fails.
self.assertIsNone(build.layout_test_results())
class BuildBotTest(unittest.TestCase):
_example_one_box_status = '''
<table>
<tr>
<td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td>
<td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td>
<tr>
<td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td>
<td class="LastBuild box" >no build</td>
<td align="center" class="Activity building">building<br />< 1 min</td>
<tr>
<td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td>
<td align="center" class="Activity idle">idle<br />3 pending</td>
<tr>
<td class="box"><a href="builders/Qt%20Windows%2032-bit%20Debug">Qt Windows 32-bit Debug</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Windows%2032-bit%20Debug/builds/2090">60563</a><br />failed<br />failed<br />slave<br />lost</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 5 mins<br />at 08:25</td>
</table>
'''
_expected_example_one_box_parsings = [
{
'is_green': True,
'build_number' : 3693,
'name': u'Windows Debug (Tests)',
'built_revision': 47380,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : None,
'name': u'SnowLeopard Intel Release',
'built_revision': None,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : 654,
'name': u'Qt Linux Release',
'built_revision': 47383,
'activity': 'idle',
'pending_builds': 3,
},
{
'is_green': True,
'build_number' : 2090,
'name': u'Qt Windows 32-bit Debug',
'built_revision': 60563,
'activity': 'building',
'pending_builds': 0,
},
]
def test_status_parsing(self):
buildbot = BuildBot()
soup = BeautifulSoup(self._example_one_box_status)
status_table = soup.find("table")
input_rows = status_table.findAll('tr')
for x in range(len(input_rows)):
status_row = input_rows[x]
expected_parsing = self._expected_example_one_box_parsings[x]
builder = buildbot._parse_builder_status_from_row(status_row)
# Make sure we aren't parsing more or less than we expect
self.assertEqual(builder.keys(), expected_parsing.keys())
for key, expected_value in expected_parsing.items():
self.assertEqual(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value)))
def test_builder_with_name(self):
buildbot = BuildBot()
builder = buildbot.builder_with_name("Test Builder")
self.assertEqual(builder.name(), "Test Builder")
self.assertEqual(builder.url(), "http://build.webkit.org/builders/Test%20Builder")
self.assertEqual(builder.url_encoded_name(), "Test%20Builder")
self.assertEqual(builder.results_url(), "http://build.webkit.org/results/Test%20Builder")
# Override _fetch_build_dictionary function to not touch the network.
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision" : 2 * build_number,
},
"number" : int(build_number),
"results" : build_number % 2, # 0 means pass
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
build = builder.build(10)
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/10")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r20%20%2810%29")
self.assertEqual(build.revision(), 20)
self.assertTrue(build.is_green())
build = build.previous_build()
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/9")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r18%20%289%29")
self.assertEqual(build.revision(), 18)
self.assertFalse(build.is_green())
self.assertIsNone(builder.build(None))
_example_directory_listing = '''
<h1>Directory listing for /results/SnowLeopard Intel Leaks/</h1>
<table>
<tr class="alt">
<th>Filename</th>
<th>Size</th>
<th>Content type</th>
<th>Content encoding</th>
</tr>
<tr class="directory ">
<td><a href="r47483%20%281%29/"><b>r47483 (1)/</b></a></td>
<td><b></b></td>
<td><b>[Directory]</b></td>
<td><b></b></td>
</tr>
<tr class="file alt">
<td><a href="r47484%20%282%29.zip">r47484 (2).zip</a></td>
<td>89K</td>
<td>[application/zip]</td>
<td></td>
</tr>
'''
_expected_files = [
{
"filename" : "r47483 (1)/",
"size" : "",
"type" : "[Directory]",
"encoding" : "",
},
{
"filename" : "r47484 (2).zip",
"size" : "89K",
"type" : "[application/zip]",
"encoding" : "",
},
]
def test_parse_build_to_revision_map(self):
buildbot = BuildBot()
files = buildbot._parse_twisted_directory_listing(self._example_directory_listing)
self.assertEqual(self._expected_files, files)
_fake_builder_page = '''
<body>
<div class="content">
<h1>Some Builder</h1>
<p>(<a href="../waterfall?show=Some Builder">view in waterfall</a>)</p>
<div class="column">
<h2>Recent Builds:</h2>
<table class="info">
<tr>
<th>Time</th>
<th>Revision</th>
<th>Result</th> <th>Build #</th>
<th>Info</th>
</tr>
<tr class="alt">
<td>Jan 10 15:49</td>
<td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
<td class="success">failure</td> <td><a href=".../37604">#37604</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="">
<td>Jan 10 15:32</td>
<td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
<td class="success">failure</td> <td><a href=".../37603">#37603</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="alt">
<td>Jan 10 15:18</td>
<td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
<td class="success">success</td> <td><a href=".../37602">#37602</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="">
<td>Jan 10 14:51</td>
<td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
<td class="failure">failure</td> <td><a href=".../37601">#37601</a></td>
<td class="left">Failed compile-webkit</td>
</tr>
</table>
</body>'''
_fake_builder_page_without_success = '''
<body>
<table>
<tr class="alt">
<td>Jan 10 15:49</td>
<td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="">
<td>Jan 10 15:32</td>
<td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="alt">
<td>Jan 10 15:18</td>
<td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="">
<td>Jan 10 11:58</td>
<td><span class="revision" title="Revision ??"><a href="http://trac.webkit.org/changeset/%3F%3F">??</a></span></td>
<td class="retry">retry</td>
</tr>
<tr class="">
<td>Jan 10 14:51</td>
<td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
<td class="failure">failure</td>
</tr>
</table>
</body>'''
def test_revisions_for_builder(self):
buildbot = BuildBot()
buildbot._fetch_builder_page = lambda builder: builder.page
builder_with_success = Builder('Some builder', None)
builder_with_success.page = self._fake_builder_page
self.assertEqual(buildbot._revisions_for_builder(builder_with_success), [(104643, False), (104636, False), (104635, True), (104633, False)])
builder_without_success = Builder('Some builder', None)
builder_without_success.page = self._fake_builder_page_without_success
self.assertEqual(buildbot._revisions_for_builder(builder_without_success), [(104643, False), (104636, False), (104635, False), (104633, False)])
def test_find_green_revision(self):
buildbot = BuildBot()
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, True), (3, False)],
'Builder 3': [(1, True), (3, True)],
}), 1)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, False), (3, True)],
'Builder 2': [(1, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [(1, False), (2, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [(1, True), (2, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), 2)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, False), (2, True)],
'Builder 2': [(1, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, True)],
'Builder 3': [(2, True), (4, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
'Builder 3': [(2, True), (4, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
'Builder 3': [(2, True), (3, True), (4, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [],
'Builder 3': [(1, True), (2, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, False), (5, True), (10, True), (12, False)],
'Builder 2': [(1, True), (3, False), (7, True), (9, True), (12, False)],
'Builder 3': [(1, True), (3, True), (7, True), (11, False), (12, True)],
}), 7)
def _fetch_build(self, build_number):
if build_number == 5:
return "correct build"
return "wrong build"
def _fetch_revision_to_build_map(self):
return {'r5': 5, 'r2': 2, 'r3': 3}
def test_latest_cached_build(self):
b = Builder('builder', BuildBot())
b._fetch_build = self._fetch_build
b._fetch_revision_to_build_map = self._fetch_revision_to_build_map
self.assertEqual("correct build", b.latest_cached_build())
def results_url(self):
return "some-url"
def test_results_zip_url(self):
b = Build(None, 123, 123, False)
b.results_url = self.results_url
self.assertEqual("some-url.zip", b.results_zip_url())
|
coloringchaos/three.js
|
refs/heads/master
|
utils/exporters/blender/addons/io_three/exporter/material.py
|
124
|
from .. import constants, logger
from . import base_classes, utilities, api
class Material(base_classes.BaseNode):
"""Class that wraps material nodes"""
def __init__(self, node, parent):
logger.debug("Material().__init__(%s)", node)
base_classes.BaseNode.__init__(self, node, parent,
constants.MATERIAL)
self._common_attributes()
if self[constants.TYPE] == constants.THREE_PHONG:
self._phong_attributes()
textures = self.parent.options.get(constants.MAPS)
if textures:
self._update_maps()
def _common_attributes(self):
"""Parse the common material attributes"""
logger.debug('Material()._common_attributes()')
dispatch = {
constants.PHONG: constants.THREE_PHONG,
constants.LAMBERT: constants.THREE_LAMBERT,
constants.BASIC: constants.THREE_BASIC
}
shader_type = api.material.type(self.node)
self[constants.TYPE] = dispatch[shader_type]
diffuse = api.material.diffuse_color(self.node)
self[constants.COLOR] = utilities.rgb2int(diffuse)
if self[constants.TYPE] != constants.THREE_BASIC:
ambient = api.material.ambient_color(self.node)
self[constants.AMBIENT] = utilities.rgb2int(ambient)
emissive = api.material.emissive_color(self.node)
self[constants.EMISSIVE] = utilities.rgb2int(emissive)
vertex_color = api.material.use_vertex_colors(self.node)
self[constants.VERTEX_COLORS] = vertex_color
self[constants.BLENDING] = api.material.blending(self.node)
self[constants.DEPTH_TEST] = api.material.depth_test(self.node)
self[constants.DEPTH_WRITE] = api.material.depth_write(self.node)
def _phong_attributes(self):
"""Parse phong specific attributes"""
logger.debug("Material()._phong_attributes()")
specular = api.material.specular_color(self.node)
self[constants.SPECULAR] = utilities.rgb2int(specular)
self[constants.SHININESS] = api.material.specular_coef(self.node)
def _update_maps(self):
"""Parses maps/textures and updates the textures array
with any new nodes found.
"""
logger.debug("Material()._update_maps()")
mapping = (
(api.material.diffuse_map, constants.MAP),
(api.material.specular_map, constants.SPECULAR_MAP),
(api.material.light_map, constants.LIGHT_MAP)
)
for func, key in mapping:
map_node = func(self.node)
if map_node:
logger.info('Found map node %s for %s', map_node, key)
tex_inst = self.scene.texture(map_node.name)
self[key] = tex_inst[constants.UUID]
if self[constants.TYPE] == constants.THREE_PHONG:
mapping = (
(api.material.bump_map, constants.BUMP_MAP,
constants.BUMP_SCALE, api.material.bump_scale),
(api.material.normal_map, constants.NORMAL_MAP,
constants.NORMAL_SCALE, api.material.normal_scale)
)
for func, map_key, scale_key, scale_func in mapping:
map_node = func(self.node)
if not map_node:
continue
logger.info("Found map node %s for %s", map_node, map_key)
tex_inst = self.scene.texture(map_node.name)
self[map_key] = tex_inst[constants.UUID]
self[scale_key] = scale_func(self.node)
|
yewang15215/django
|
refs/heads/master
|
django/http/multipartparser.py
|
4
|
"""
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import (
RequestDataTooBig, SuspiciousMultipartForm, TooManyFieldsSent,
)
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the
uploaded data.
:encoding:
The encoding with which to treat the incoming data.
"""
# Content-Type should contain multipart and the boundary information.
content_type = META.get('CONTENT_TYPE', '')
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Return a tuple containing the POST and FILES dictionary, respectively.
"""
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict(encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(
self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding,
)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict(mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
# Number of bytes that have been read.
num_bytes_read = 0
# To count the number of keys in the request.
num_post_keys = 0
# To limit the amount of data read from the request.
read_size = None
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
num_post_keys += 1
if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None and
settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys):
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
# Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read(size=read_size)
num_bytes_read += len(raw_data)
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read(size=read_size)
num_bytes_read += len(data)
# Add two here to make the check consistent with the
# x-www-form-urlencoded check that includes '&='.
num_bytes_read += len(field_name) + 2
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
self._post.appendlist(field_name, force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if file_name:
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
if not file_name:
continue
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(
field_name, file_name, content_type,
content_length, charset, content_type_extra,
)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk, counters[i])
counters[i] += chunk_length
if chunk is None:
# Don't continue if the chunk received by
# the handler is None.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_text(old_field_name, self._encoding, errors='replace'), file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([
current_number for current_number in self._unget_history
if current_number == num_bytes
])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
"""
Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later.
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
leeseuljeong/leeseulstack_neutron
|
refs/heads/master
|
neutron/plugins/ml2/drivers/arista/__init__.py
|
12133432
| |
mozilla/ChangeDetector
|
refs/heads/dev
|
analysis/__init__.py
|
12133432
| |
sjh/python
|
refs/heads/master
|
whoscall_fast_query.py
|
1
|
#!/usr/bin/env python3
# _*_ coding: utf-8 _*_
from __future__ import print_function
import requests
import sys
from bs4 import BeautifulSoup
API_URL = "https://number.whoscall.com/zh-TW/tw/"
def query_whoscall(phone_number):
""" Query whoscall database by HTTP GET https://whoscall.com/zh-TW/ """
response = requests.get("{}{}".format(API_URL, phone_number))
if 200 == response.status_code:
document = response.content
soup = BeautifulSoup(document, 'html.parser')
title_string = soup.title.string.strip().split(' ')
if len(title_string) == 12:
owner_name = title_string[0]
owner_phone = title_string[2]
owner_city = title_string[6]
elif len(title_string) == 14:
owner_name = "無資料"
owner_phone = title_string[4]
owner_city = title_string[8]
print("\n電話號碼: {}\n擁有者: {}\n擁有者所在城市: {}".format(owner_phone, owner_name, owner_city))
site_container = soup.body.div.contents[3]
site_main_container = site_container.contents[1]
ndp_container = site_main_container.contents[1]
number_info = ndp_container.contents[3]
if number_info.p is None:
pass
elif "這個號碼還沒有被回報" == number_info.p.string.strip():
print("這個號碼還沒有被回報")
else:
owner_name = number_info.h1.string.strip()
number_info_ohours_addr = number_info.contents[5]
all_spans = number_info_ohours_addr.findAll("span")
if all_spans:
business_hour = all_spans[1].span.string
else:
business_hour = None
try:
if all_spans:
address = all_spans[-1].string
else:
address = None
except(AttributeError):
address = u""
if business_hour:
print("營業狀況: {}".format(business_hour))
if address:
print("地址: {}".format(address))
return True
def test_query_whoscall():
""" Test function for whoscall. """
assert query_whoscall("0227208889") is True
assert query_whoscall("0286651720") is True
assert query_whoscall("0286651719") is True
if __name__ == "__main__":
"""
if len(sys.argv) >= 2:
for number in sys.argv[1:]:
query_whoscall(number)
"""
print("Whoscall web page has implemented Google ReCaptcha and cannot be used for command \
line interface now.")
|
afandria/sky_engine
|
refs/heads/master
|
sky/engine/bindings/scripts/idl_types.py
|
14
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""IDL type handling.
Classes:
IdlTypeBase
IdlType
IdlUnionType
IdlArrayOrSequenceType
IdlArrayType
IdlSequenceType
IdlNullableType
"""
from collections import defaultdict
################################################################################
# IDL types
################################################################################
INTEGER_TYPES = frozenset([
# http://www.w3.org/TR/WebIDL/#dfn-integer-type
'byte',
'octet',
'short',
'unsigned short',
# int and unsigned are not IDL types
'long',
'unsigned long',
'long long',
'unsigned long long',
])
NUMERIC_TYPES = (INTEGER_TYPES | frozenset([
# http://www.w3.org/TR/WebIDL/#dfn-numeric-type
'float',
'unrestricted float',
'double',
'unrestricted double',
]))
# http://www.w3.org/TR/WebIDL/#dfn-primitive-type
PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES)
BASIC_TYPES = (PRIMITIVE_TYPES | frozenset([
# Built-in, non-composite, non-object data types
# http://heycam.github.io/webidl/#idl-types
'DOMString',
'ByteString',
'Date',
# http://heycam.github.io/webidl/#es-type-mapping
'void',
# http://encoding.spec.whatwg.org/#type-scalarvaluestring
'ScalarValueString',
]))
TYPE_NAMES = {
# http://heycam.github.io/webidl/#dfn-type-name
'any': 'Any',
'boolean': 'Boolean',
'byte': 'Byte',
'octet': 'Octet',
'short': 'Short',
'unsigned short': 'UnsignedShort',
'long': 'Long',
'unsigned long': 'UnsignedLong',
'long long': 'LongLong',
'unsigned long long': 'UnsignedLongLong',
'float': 'Float',
'unrestricted float': 'UnrestrictedFloat',
'double': 'Double',
'unrestricted double': 'UnrestrictedDouble',
'DOMString': 'String',
'ByteString': 'ByteString',
'ScalarValueString': 'ScalarValueString',
'object': 'Object',
'Date': 'Date',
}
STRING_TYPES = frozenset([
# http://heycam.github.io/webidl/#es-interface-call (step 10.11)
# (Interface object [[Call]] method's string types.)
'String',
'ByteString',
'ScalarValueString',
])
################################################################################
# Inheritance
################################################################################
ancestors = defaultdict(list) # interface_name -> ancestors
def inherits_interface(interface_name, ancestor_name):
return (interface_name == ancestor_name or
ancestor_name in ancestors[interface_name])
def set_ancestors(new_ancestors):
ancestors.update(new_ancestors)
class IdlTypeBase(object):
"""Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType."""
def __str__(self):
raise NotImplementedError(
'__str__() should be defined in subclasses')
def __getattr__(self, name):
# Default undefined attributes to None (analogous to Jinja variables).
# This allows us to not define default properties in the base class, and
# allows us to relay __getattr__ in IdlNullableType to the inner type.
return None
def resolve_typedefs(self, typedefs):
raise NotImplementedError(
'resolve_typedefs should be defined in subclasses')
################################################################################
# IdlType
################################################################################
class IdlType(IdlTypeBase):
# FIXME: incorporate Nullable, etc.
# to support types like short?[] vs. short[]?, instead of treating these
# as orthogonal properties (via flags).
callback_functions = set()
callback_interfaces = set()
dictionaries = set()
enums = {} # name -> values
def __init__(self, base_type, is_unrestricted=False):
super(IdlType, self).__init__()
if is_unrestricted:
self.base_type = 'unrestricted %s' % base_type
else:
self.base_type = base_type
def __str__(self):
return self.base_type
@property
def is_basic_type(self):
return self.base_type in BASIC_TYPES
@property
def is_callback_function(self):
return self.base_type in IdlType.callback_functions
@property
def is_callback_interface(self):
return self.base_type in IdlType.callback_interfaces
@property
def is_enum(self):
# FIXME: add an IdlEnumType class and a resolve_enums step at end of
# IdlDefinitions constructor
return self.name in IdlType.enums
@property
def enum_values(self):
return IdlType.enums[self.name]
@property
def is_integer_type(self):
return self.base_type in INTEGER_TYPES
@property
def is_numeric_type(self):
return self.base_type in NUMERIC_TYPES
@property
def is_primitive_type(self):
return self.base_type in PRIMITIVE_TYPES
@property
def is_interface_type(self):
# Anything that is not another type is an interface type.
# http://www.w3.org/TR/WebIDL/#idl-types
# http://www.w3.org/TR/WebIDL/#idl-interface
# In C++ these are RefPtr or PassRefPtr types.
return not(self.is_basic_type or
self.is_callback_function or
self.is_enum or
self.name == 'Any' or
self.name == 'Object' or
self.name == 'Promise') # Promise will be basic in future
@property
def is_string_type(self):
return self.name in STRING_TYPES
@property
def may_raise_exception_on_conversion(self):
return (self.is_integer_type or
self.name in ('ByteString', 'ScalarValueString'))
@property
def is_union_type(self):
return isinstance(self, IdlUnionType)
@property
def name(self):
"""Return type name
http://heycam.github.io/webidl/#dfn-type-name
"""
base_type = self.base_type
return TYPE_NAMES.get(base_type, base_type)
@classmethod
def set_callback_functions(cls, new_callback_functions):
cls.callback_functions.update(new_callback_functions)
@classmethod
def set_callback_interfaces(cls, new_callback_interfaces):
cls.callback_interfaces.update(new_callback_interfaces)
@classmethod
def set_dictionaries(cls, new_dictionaries):
cls.dictionaries.update(new_dictionaries)
@classmethod
def set_enums(cls, new_enums):
cls.enums.update(new_enums)
def resolve_typedefs(self, typedefs):
# This function either returns |self| or a different object.
# FIXME: Rename typedefs_resolved().
return typedefs.get(self.base_type, self)
################################################################################
# IdlUnionType
################################################################################
class IdlUnionType(IdlTypeBase):
# http://heycam.github.io/webidl/#idl-union
def __init__(self, member_types):
super(IdlUnionType, self).__init__()
self.member_types = member_types
@property
def is_union_type(self):
return True
@property
def name(self):
"""Return type name (or inner type name if nullable)
http://heycam.github.io/webidl/#dfn-type-name
"""
return 'Or'.join(member_type.name for member_type in self.member_types)
def resolve_typedefs(self, typedefs):
self.member_types = [
typedefs.get(member_type, member_type)
for member_type in self.member_types]
return self
################################################################################
# IdlArrayOrSequenceType, IdlArrayType, IdlSequenceType
################################################################################
class IdlArrayOrSequenceType(IdlTypeBase):
"""Base class for IdlArrayType and IdlSequenceType."""
def __init__(self, element_type):
super(IdlArrayOrSequenceType, self).__init__()
self.element_type = element_type
def resolve_typedefs(self, typedefs):
self.element_type = self.element_type.resolve_typedefs(typedefs)
return self
class IdlArrayType(IdlArrayOrSequenceType):
def __init__(self, element_type):
super(IdlArrayType, self).__init__(element_type)
def __str__(self):
return '%s[]' % self.element_type
@property
def name(self):
return self.element_type.name + 'Array'
class IdlSequenceType(IdlArrayOrSequenceType):
def __init__(self, element_type):
super(IdlSequenceType, self).__init__(element_type)
def __str__(self):
return 'sequence<%s>' % self.element_type
@property
def name(self):
return self.element_type.name + 'Sequence'
################################################################################
# IdlNullableType
################################################################################
class IdlNullableType(IdlTypeBase):
def __init__(self, inner_type):
super(IdlNullableType, self).__init__()
self.inner_type = inner_type
def __str__(self):
# FIXME: Dictionary::ConversionContext::setConversionType can't
# handle the '?' in nullable types (passes nullability separately).
# Update that function to handle nullability from the type name,
# simplifying its signature.
# return str(self.inner_type) + '?'
return str(self.inner_type)
def __getattr__(self, name):
return getattr(self.inner_type, name)
@property
def is_nullable(self):
return True
@property
def name(self):
return self.inner_type.name
def resolve_typedefs(self, typedefs):
self.inner_type = self.inner_type.resolve_typedefs(typedefs)
return self
|
weolar/miniblink49
|
refs/heads/master
|
third_party/WebKit/Tools/Scripts/webkitpy/tool/commands/prettydiff.py
|
186
|
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool import steps
class PrettyDiff(AbstractSequencedCommand):
name = "pretty-diff"
help_text = "Shows the pretty diff in the default browser"
show_in_main_help = True
steps = [
steps.ConfirmDiff,
]
|
metacloud/percona-xtrabackup
|
refs/heads/master
|
percona-server-5.1-xtrabackup/python-for-subunit2junitxml/testtools/tests/test_testsuite.py
|
42
|
# Copyright (c) 2009 testtools developers. See LICENSE for details.
"""Test ConcurrentTestSuite and related things."""
__metaclass__ = type
import datetime
import unittest
from testtools import (
ConcurrentTestSuite,
iterate_tests,
TestCase,
)
from testtools.matchers import (
Equals,
)
from testtools.tests.helpers import LoggingResult
class TestConcurrentTestSuiteRun(TestCase):
def test_trivial(self):
log = []
result = LoggingResult(log)
class Sample(TestCase):
def __hash__(self):
return id(self)
def test_method1(self):
pass
def test_method2(self):
pass
test1 = Sample('test_method1')
test2 = Sample('test_method2')
original_suite = unittest.TestSuite([test1, test2])
suite = ConcurrentTestSuite(original_suite, self.split_suite)
suite.run(result)
# 0 is the timestamp for the first test starting.
test1 = log[1][1]
test2 = log[-1][1]
self.assertIsInstance(test1, Sample)
self.assertIsInstance(test2, Sample)
self.assertNotEqual(test1.id(), test2.id())
def split_suite(self, suite):
tests = list(iterate_tests(suite))
return tests[0], tests[1]
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
|
tylertian/Openstack
|
refs/heads/master
|
openstack F/glance/glance/tests/unit/test_context_middleware.py
|
1
|
import webob
from glance.api.middleware import context
import glance.context
from glance.tests.unit import base
class TestContextMiddleware(base.IsolatedUnitTest):
def _build_request(self, roles=None, identity_status='Confirmed',
service_catalog=None):
req = webob.Request.blank('/')
req.headers['x-auth-token'] = 'token1'
req.headers['x-identity-status'] = identity_status
req.headers['x-user-id'] = 'user1'
req.headers['x-tenant-id'] = 'tenant1'
_roles = roles or ['role1', 'role2']
req.headers['x-roles'] = ','.join(_roles)
if service_catalog:
req.headers['x-service-catalog'] = service_catalog
return req
def _build_middleware(self):
return context.ContextMiddleware(None)
def test_header_parsing(self):
req = self._build_request()
self._build_middleware().process_request(req)
self.assertEqual(req.context.auth_tok, 'token1')
self.assertEqual(req.context.user, 'user1')
self.assertEqual(req.context.tenant, 'tenant1')
self.assertEqual(req.context.roles, ['role1', 'role2'])
def test_is_admin_flag(self):
# is_admin check should look for 'admin' role by default
req = self._build_request(roles=['admin', 'role2'])
self._build_middleware().process_request(req)
self.assertTrue(req.context.is_admin)
# without the 'admin' role, is_admin should be False
req = self._build_request()
self._build_middleware().process_request(req)
self.assertFalse(req.context.is_admin)
# if we change the admin_role attribute, we should be able to use it
req = self._build_request()
self.config(admin_role='role1')
self._build_middleware().process_request(req)
self.assertTrue(req.context.is_admin)
def test_roles_case_insensitive(self):
# accept role from request
req = self._build_request(roles=['Admin', 'role2'])
self._build_middleware().process_request(req)
self.assertTrue(req.context.is_admin)
# accept role from config
req = self._build_request(roles=['role1'])
self.config(admin_role='rOLe1')
self._build_middleware().process_request(req)
self.assertTrue(req.context.is_admin)
def test_roles_stripping(self):
# stripping extra spaces in request
req = self._build_request(roles=['\trole1'])
self.config(admin_role='role1')
self._build_middleware().process_request(req)
self.assertTrue(req.context.is_admin)
# stripping extra spaces in config
req = self._build_request(roles=['\trole1\n'])
self.config(admin_role=' role1\t')
self._build_middleware().process_request(req)
self.assertTrue(req.context.is_admin)
def test_anonymous_access_enabled(self):
req = self._build_request(identity_status='Nope')
self.config(allow_anonymous_access=True)
middleware = self._build_middleware()
middleware.process_request(req)
self.assertEqual(req.context.auth_tok, None)
self.assertEqual(req.context.user, None)
self.assertEqual(req.context.tenant, None)
self.assertEqual(req.context.roles, [])
self.assertFalse(req.context.is_admin)
self.assertTrue(req.context.read_only)
def test_anonymous_access_defaults_to_disabled(self):
req = self._build_request(identity_status='Nope')
middleware = self._build_middleware()
self.assertRaises(webob.exc.HTTPUnauthorized,
middleware.process_request, req)
def test_service_catalog(self):
catalog_json = "[{}]"
req = self._build_request(service_catalog=catalog_json)
self._build_middleware().process_request(req)
self.assertEqual([{}], req.context.service_catalog)
def test_invalid_service_catalog(self):
catalog_json = "bad json"
req = self._build_request(service_catalog=catalog_json)
middleware = self._build_middleware()
self.assertRaises(webob.exc.HTTPInternalServerError,
middleware.process_request, req)
class TestUnauthenticatedContextMiddleware(base.IsolatedUnitTest):
def test_request(self):
middleware = context.UnauthenticatedContextMiddleware(None)
req = webob.Request.blank('/')
middleware.process_request(req)
self.assertEqual(req.context.auth_tok, None)
self.assertEqual(req.context.user, None)
self.assertEqual(req.context.tenant, None)
self.assertEqual(req.context.roles, [])
self.assertTrue(req.context.is_admin)
def test_response(self):
middleware = context.UnauthenticatedContextMiddleware(None)
req = webob.Request.blank('/')
req.context = glance.context.RequestContext()
resp = webob.Response()
resp.request = req
middleware.process_response(resp)
self.assertEqual(resp.headers['x-openstack-request-id'],
'req-%s' % req.context.request_id)
|
cyanut/django-haystack
|
refs/heads/master
|
test_haystack/settings.py
|
13
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from tempfile import mkdtemp
SECRET_KEY = "Please do not spew DeprecationWarnings"
# Haystack settings for running tests.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'haystack_tests.db',
}
}
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'haystack',
'test_haystack.discovery',
'test_haystack.core',
'test_haystack.spatial',
'test_haystack.multipleindex',
# This app exists to confirm that nothing breaks when INSTALLED_APPS has an app without models.py
# which is common in some cases for things like admin extensions, reporting, etc.
'test_haystack.test_app_without_models',
# Confirm that everything works with app labels which have more than one level of hierarchy
# as reported in https://github.com/django-haystack/django-haystack/issues/1152
'test_haystack.test_app_with_hierarchy.contrib.django.hierarchal_app_django',
]
import django
if django.VERSION >= (1, 7):
INSTALLED_APPS.append('test_haystack.test_app_using_appconfig.apps.SimpleTestAppConfig')
SITE_ID = 1
ROOT_URLCONF = 'test_haystack.core.urls'
HAYSTACK_ROUTERS = ['haystack.routers.DefaultRouter', 'test_haystack.multipleindex.routers.MultipleIndexRouter']
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'test_haystack.mocks.MockEngine',
},
'whoosh': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': mkdtemp(prefix='test_whoosh_query'),
'INCLUDE_SPELLING': True,
},
'filtered_whoosh': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': mkdtemp(prefix='haystack-multipleindex-filtered-whoosh-tests-'),
'EXCLUDED_INDEXES': ['test_haystack.multipleindex.search_indexes.BarIndex'],
},
'elasticsearch': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': '127.0.0.1:9200/',
'INDEX_NAME': 'test_default',
'INCLUDE_SPELLING': True,
},
'simple': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
'solr': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://localhost:9001/solr/',
'INCLUDE_SPELLING': True,
},
}
SITE_ID = 1
MIDDLEWARE_CLASSES = ('django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware')
|
jk1/intellij-community
|
refs/heads/master
|
python/testData/completion/superClassAttributesNoCompletionInFunc.py
|
166
|
class Foo:
attribute = 1
class Boo(Foo):
def f(self):
attr<caret>
|
chop-dbhi/serrano
|
refs/heads/master
|
serrano/resources/templates.py
|
1
|
Category = {
'fields': [':pk', 'name', 'order', 'parent_id'],
'allow_missing': True,
}
BriefField = {
'fields': [':pk', 'name', 'description'],
'aliases': {
'name': '__unicode__',
},
'allow_missing': True,
}
Field = {
'fields': [
':pk', 'name', 'plural_name', 'description', 'keywords',
'app_name', 'model_name', 'field_name',
'modified', 'published', 'operators',
'simple_type', 'internal_type', 'data_modified', 'enumerable',
'searchable', 'unit', 'plural_unit', 'nullable', 'order'
],
'aliases': {
'name': '__unicode__',
'plural_name': 'get_plural_name',
'plural_unit': 'get_plural_unit',
},
'allow_missing': True,
}
BriefConcept = {
'fields': [':pk', 'name', 'description'],
'allow_missing': True,
}
Concept = {
'fields': [
':pk', 'name', 'plural_name', 'description', 'keywords',
'category_id', 'order', 'modified', 'published',
'formatter', 'queryable', 'sortable', 'viewable'
],
'aliases': {
'name': '__unicode__',
'plural_name': 'get_plural_name',
},
'allow_missing': True,
}
Context = {
'exclude': ['user', 'session_key'],
'allow_missing': True,
}
View = {
'exclude': ['user', 'session_key'],
'allow_missing': True,
}
User = {
'fields': [':pk', 'name', 'username', 'email'],
'aliases': {
'name': 'get_full_name',
}
}
BriefQuery = {
'include': [':pk', 'name', 'context_json', 'view_json'],
'allow_missing': True,
}
ForkedQuery = {
'fields': [':pk', 'parent'],
'allow_missing': True,
}
Query = {
'fields': [':pk', 'accessed', 'name', 'description', 'user',
'shared_users', 'context_json', 'view_json', 'public'],
'related': {
'user': User,
'shared_users': User,
}
}
Revision = {
'exclude': ['user', 'session_key', 'data'],
'allow_missing': True,
}
|
Inter-Actief/alexia
|
refs/heads/master
|
alexia/apps/scheduling/migrations/0007_event_is_risky.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scheduling', '0006_convert_availability_nature'),
]
operations = [
migrations.AddField(
model_name='event',
name='is_risky',
field=models.BooleanField(default=False, verbose_name=b'risky'),
),
]
|
jungle90/Openstack-Swift-I-O-throttler
|
refs/heads/master
|
build/lib.linux-x86_64-2.7/swift/cli/__init__.py
|
12133432
| |
Anonymous-X6/django
|
refs/heads/master
|
tests/admin_scripts/simple_app/management/commands/__init__.py
|
12133432
| |
pristinesource/MissionPlanner
|
refs/heads/master
|
ExtLibs/Mavlink/pymavlink/generator/lib/__init__.py
|
12133432
| |
lowiki-org/localwiki-backend-server
|
refs/heads/master
|
localwiki/users/templatetags/__init__.py
|
12133432
| |
emmuchira/kps_erp
|
refs/heads/develop
|
erpnext/docs/user/manual/de/website/setup/__init__.py
|
12133432
| |
gabrielf10/webAmpunc
|
refs/heads/master
|
webAmpunc/__init__.py
|
12133432
| |
halberom/ansible
|
refs/heads/devel
|
test/units/contrib/__init__.py
|
12133432
| |
nfcpy/ndeftool
|
refs/heads/master
|
src/ndeftool/commands/Save.py
|
1
|
# -*- coding: utf-8 -*-
import os.path
import shutil
import click
import ndef
from ndeftool.cli import command_processor, dmsg, info, warn
@click.command(short_help="Save records or payloads to disk.")
@click.argument('path', type=click.Path(writable=True))
@click.option('--skip', type=click.IntRange(min=0), default=0,
metavar='N', help="Skip the first N records.")
@click.option('--count', type=click.IntRange(min=1), default=None,
metavar='N', help="Skip the first N records.")
@click.option('--head', type=click.IntRange(min=1), default=None,
metavar='N', help="Save the first N records.")
@click.option('--tail', type=click.IntRange(min=1), default=None,
metavar='N', help="Save the last N records.")
@click.option('-b', '--burst', is_flag=True,
help="Save single record files in directory.")
@click.option('-u', '--unpack', is_flag=True,
help="Unpack records to files in directory.")
@click.option('-f', '--force', is_flag=True,
help="Replace existing file or directory.")
@click.option('-k', '--keep', is_flag=True,
help="Forward records to next command.")
@command_processor
@click.pass_context
def cmd(ctx, message, **kwargs):
"""The *save* command writes the current records to disk. The records
to write can be restricted to the subset selected with '--skip',
'--count', '--head' and '--tail' applied in that order. The
default mode is to save all selected records as one NDEF message
into a single file given by PATH. In '--burst' mode each record is
written as one NDEF message into a separate file under the
directory given by PATH. The file names are three digit numbers
created from the record index. In '--unpack' mode the payload of
each record is written to a separate file under directory PATH
with the file name set to the record name (NDEF Record ID).
Records without name are not written unless '--unpack' and
'--burst' are both set.
The *save* command does not replace existing files or directories
unless this is requested with '--force'.
The *save* command consumes records from the internal message
pipe. This can be prevented with '--keep', all records are then
forwarded to the next command or written to standard output. When
*save* is the first command it creates the pipe by reading from
standard input.
\b
Examples:
ndeftool text 'Hello World' save text.ndef
ndeftool text 'Hello World' | ndeftool save text.ndef
ndeftool text 'One' save one.ndef text 'Two' save two.ndef
"""
dmsg(__name__ + ' ' + str(kwargs))
path = kwargs['path']
if os.path.exists(path) and not kwargs['force']:
errmsg = "path '%s' exists. Use '--force' to replace."
raise click.ClickException(errmsg % path)
if message is None:
info("Reading data from standard input")
octets = click.get_binary_stream('stdin').read()
errors = ctx.meta['decode-errors']
try:
message = list(ndef.message_decoder(octets, errors))
except ndef.DecodeError as error:
raise click.ClickException(str(error))
first = min(kwargs['skip'], len(message))
count = min(kwargs['count'] or len(message), len(message))
head = min(kwargs['head'] or count, count)
tail = min(kwargs['tail'] or count, count)
dmsg("first=%d count=%d head=%d tail=%d" % (first, count, head, tail))
count = min(head, tail, len(message) - first)
first = first + head - min(head, tail)
dmsg("first=%d count=%d head=%d tail=%d" % (first, count, head, tail))
if kwargs['burst'] or kwargs['unpack']:
path = os.path.normpath(path)
try:
if os.path.isdir(path):
shutil.rmtree(path)
os.mkdir(path)
except (OSError, IOError) as error:
raise click.ClickException(str(error))
for index, record in enumerate(message[first:first+count]):
name = None
if kwargs['unpack'] and record.name:
name = record.name
if kwargs['burst'] and not name:
name = '%03d.ndef' % index
if name:
with click.open_file('%s/%s' % (path, name), 'wb') as f:
info("Saving 1 record to {}.".format(f.name))
if kwargs['unpack']:
f.write(record.data)
else:
f.write(b''.join(ndef.message_encoder([record])))
else:
warn("Skipping 1 record without name")
else:
with click.open_file(path, 'wb') as f:
filename = f.name if f.name != '-' else '<stdout>'
info("Saving {num} record{s} to {path}.".format(
num=count, path=filename, s=('', 's')[count > 1]))
f.write(b''.join(ndef.message_encoder(message[first:first+count])))
if not kwargs['keep']:
del message[first:first+count]
return message
|
blooparksystems/odoo
|
refs/heads/9.0
|
openerp/tools/lru.py
|
69
|
# -*- coding: utf-8 -*-
# taken from http://code.activestate.com/recipes/252524-length-limited-o1-lru-cache-implementation/
import threading
from func import synchronized
__all__ = ['LRU']
class LRUNode(object):
__slots__ = ['prev', 'next', 'me']
def __init__(self, prev, me):
self.prev = prev
self.me = me
self.next = None
class LRU(object):
"""
Implementation of a length-limited O(1) LRU queue.
Built for and used by PyPE:
http://pype.sourceforge.net
Copyright 2003 Josiah Carlson.
"""
def __init__(self, count, pairs=[]):
self._lock = threading.RLock()
self.count = max(count, 1)
self.d = {}
self.first = None
self.last = None
for key, value in pairs:
self[key] = value
@synchronized()
def __contains__(self, obj):
return obj in self.d
def get(self, obj, val=None):
try:
return self[obj]
except KeyError:
return val
@synchronized()
def __getitem__(self, obj):
a = self.d[obj].me
self[a[0]] = a[1]
return a[1]
@synchronized()
def __setitem__(self, obj, val):
if obj in self.d:
del self[obj]
nobj = LRUNode(self.last, (obj, val))
if self.first is None:
self.first = nobj
if self.last:
self.last.next = nobj
self.last = nobj
self.d[obj] = nobj
if len(self.d) > self.count:
if self.first == self.last:
self.first = None
self.last = None
return
a = self.first
a.next.prev = None
self.first = a.next
a.next = None
del self.d[a.me[0]]
del a
@synchronized()
def __delitem__(self, obj):
nobj = self.d[obj]
if nobj.prev:
nobj.prev.next = nobj.next
else:
self.first = nobj.next
if nobj.next:
nobj.next.prev = nobj.prev
else:
self.last = nobj.prev
del self.d[obj]
@synchronized()
def __iter__(self):
cur = self.first
while cur is not None:
cur2 = cur.next
yield cur.me[1]
cur = cur2
@synchronized()
def __len__(self):
return len(self.d)
@synchronized()
def iteritems(self):
cur = self.first
while cur is not None:
cur2 = cur.next
yield cur.me
cur = cur2
@synchronized()
def iterkeys(self):
return iter(self.d)
@synchronized()
def itervalues(self):
for i,j in self.iteritems():
yield j
@synchronized()
def keys(self):
return self.d.keys()
@synchronized()
def pop(self,key):
v=self[key]
del self[key]
return v
@synchronized()
def clear(self):
self.d = {}
self.first = None
self.last = None
|
petecummings/mezzanine-themes
|
refs/heads/master
|
wsgi.py
|
63
|
from __future__ import unicode_literals
import os
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
settings_module = "%s.settings" % PROJECT_ROOT.split(os.sep)[-1]
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_module)
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
keedio/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/__init__.py
|
38
|
VERSION = (1, 6, 10, 'final', 0)
def get_version(*args, **kwargs):
# Don't litter django/__init__.py with all the get_version stuff.
# Only import if it's actually called.
from django.utils.version import get_version
return get_version(*args, **kwargs)
|
jbteixeir/Openflow-DC-Framework
|
refs/heads/master
|
ext/Stats/ercs_stats_export.py
|
1
|
from pox.core import core
from pox.lib.recoco.recoco import Timer
from ext.Structures.ercs_switch import Switch
import os
import thread
import time
log = core.getLogger()
class ERCSStatsExport(object):
'''
Class that saves the statistics about switches and links into files
'''
def __init__(self, inithandler = None, topology = None, stats = None, vm_allocation_manager = None):
self.topology = topology
self.stats = stats
self.vm_allocation_manager = vm_allocation_manager
self.coredpid = list()
self.aggdpid = list()
self.edgedpid = list()
self.corelinks = list()
self.agglinks = list()
self.edgelinks = list()
self.polling_time = 60
if (inithandler == None) :
self.switch_stats_dir = "."
self.link_stats_dir = "."
else :
self.getArgsFromIni(inithandler)
if not os.path.exists(self.switch_stats_dir):
os.makedirs(self.switch_stats_dir)
if not os.path.exists(self.link_stats_dir):
os.makedirs(self.link_stats_dir)
if not os.path.exists(self.host_stats_dir):
os.makedirs(self.host_stats_dir)
self.core_stats_file = file((self.switch_stats_dir+"/corestats.csv"), "w+")
self.agg_stats_file = file((self.switch_stats_dir+"/aggstats.csv"), "w+")
self.edge_stats_file = file((self.switch_stats_dir+"/edgestats.csv"), "w+")
self.core_links_stats_file = file((self.link_stats_dir+"/corelinkstats.csv"), "w+")
self.agg_links_stats_file = file((self.link_stats_dir+"/agglinkstats.csv"), "w+")
self.edge_links_stats_file = file((self.link_stats_dir+"/edgelinkstats.csv"), "w+")
self.host_stats_file = file((self.host_stats_dir+"/hoststats.csv"), "w+")
thread.start_new_thread(self.export_stats, ())
def getArgsFromIni(self, inithandler):
try :
section = "statsexport"
key = "switchsratiodir"
self.switch_stats_dir = str(inithandler.read_ini_value(section, key))
key = "linksratiodir"
self.link_stats_dir = str(inithandler.read_ini_value(section, key))
key = "hostvmallocationdir"
self.host_stats_dir = str(inithandler.read_ini_value(section, key))
section = "stats"
key = "polling_time"
self.polling_time = float(inithandler.read_ini_value(section, key))
log.debug("Successfully got stats export values")
except Exception, e :
log.error("INI File doesn't contain expected values")
print e
os._exit(0)
def export_stats(self):
#write file header
self.writeSwitchFileHeaders()
self.writeLinkFileHeaders()
self.writeHostFileHeaders()
#write stats on files
time.sleep(self.polling_time)
#Start Thread that writes the stats into the files
thread.start_new_thread(self.export_stats_switches, ())
thread.start_new_thread(self.export_stats_links, ())
thread.start_new_thread(self.export_stats_hosts, ())
def writeSwitchFileHeaders(self):
not_ok = True;
while not_ok :
time.sleep(5)
not_ok = False
for dpid in self.topology.switches.keys():
if self.topology.switches[dpid].type == Switch.UNKNOWN:
not_ok = True;
if len(self.topology.switches) == 0:
not_ok = True;
log.debug("Writing to Switch Stats file header...")
switch_list = self.topology.switches.keys()
switch_list.sort()
for dpid in switch_list:
if(self.topology.switches[dpid].type == Switch.CORE):
self.coredpid.append(dpid)
self.core_stats_file.write(str(dpid)+";")
if(self.topology.switches[dpid].type == Switch.AGGREGATION):
self.aggdpid.append(dpid)
self.agg_stats_file.write(str(dpid)+";")
if(self.topology.switches[dpid].type == Switch.EDGE):
self.edgedpid.append(dpid)
self.edge_stats_file.write(str(dpid)+";")
self.core_stats_file.write("\n")
self.agg_stats_file.write("\n")
self.edge_stats_file.write("\n")
log.debug("Writing to Switch Stats file header... DONE")
def writeLinkFileHeaders(self):
log.debug("Writing to Link Stats file header...")
for dpid in self.coredpid:
for port_id in self.topology.switches[dpid].ports:
#if this port connects to a non of switch
if not self.topology.switch_links.has_key(dpid) or (self.topology.switch_links.has_key(dpid) and
not self.topology.switch_links[dpid].has_key(port_id)):
pass
else:
dst_dpid = self.topology.switch_links[dpid][port_id][0]
dst_port = self.topology.switch_links[dpid][port_id][1]
self.core_links_stats_file.write(str(dpid)+"."+str(port_id)+"<->"+str(dst_dpid)+"."+str(dst_port)+";")
self.corelinks.append((dpid,port_id, dst_dpid, dst_port))
self.core_links_stats_file.write("\n")
for dpid in self.aggdpid:
for port_id in self.topology.switches[dpid].ports:
if self.topology.switch_links.has_key(dpid) :
if self.topology.switch_links[dpid].has_key(port_id) :
dst_dpid = self.topology.switch_links[dpid][port_id][0]
dst_port = self.topology.switch_links[dpid][port_id][1]
if self.topology.switches[dst_dpid].type == Switch.EDGE:
self.agg_links_stats_file.write(str(dpid)+"."+str(port_id)+"<->"+str(dst_dpid)+"."+str(dst_port)+";")
self.agglinks.append((dpid, port_id, dst_dpid, dst_port))
self.agg_links_stats_file.write("\n")
for dpid in self.edgedpid:
for port_id in self.topology.switches[dpid].ports:
#if this port connects to a non of switch
if not self.topology.switch_links.has_key(dpid) or (self.topology.switch_links.has_key(dpid) and
not self.topology.switch_links[dpid].has_key(port_id)):
self.edge_links_stats_file.write(str(dpid)+"."+str(port_id)+";")
self.edgelinks.append((dpid,port_id))
self.edge_links_stats_file.write("\n")
log.debug("Writing to Link Stats file header... DONE")
def writeHostFileHeaders(self):
log.debug("Writing to Host Stats file header...")
for host in self.topology.hosts:
self.host_stats_file.write(str(host)+";")
self.host_stats_file.write("\n")
log.debug("Writing to Host Stats file header... DONE")
def export_stats_switches(self):
while (1):
for dpid in self.coredpid:
ratio = self.vm_allocation_manager.getSwitchRatio(dpid)
self.core_stats_file.write(str(ratio)+";")
print self.stats.getBitRateByDpid(dpid)
self.core_stats_file.write("\n")
for dpid in self.aggdpid:
ratio = self.vm_allocation_manager.getSwitchRatio(dpid)
self.agg_stats_file.write(str(ratio)+";")
self.agg_stats_file.write("\n")
for dpid in self.edgedpid:
ratio = self.vm_allocation_manager.getSwitchRatio(dpid)
self.edge_stats_file.write(str(ratio)+";")
self.edge_stats_file.write("\n")
time.sleep(self.polling_time)
def export_stats_links(self):
while(1):
for (dpid,port_id, dst_dpid, dst_port) in self.corelinks :
ratio = self.vm_allocation_manager.getLinkRatio(dpid,port_id)
self.core_links_stats_file.write(str(ratio)+";")
self.core_links_stats_file.write("\n")
for (dpid,port_id, dst_dpid, dst_port) in self.agglinks :
ratio = self.vm_allocation_manager.getLinkRatio(dpid,port_id)
self.agg_links_stats_file.write(str(ratio)+";")
self.agg_links_stats_file.write("\n")
for (dpid,port_id) in self.edgelinks :
ratio = self.vm_allocation_manager.getLinkRatio(dpid,port_id)
self.edge_links_stats_file.write(str(ratio)+";")
self.edge_links_stats_file.write("\n")
time.sleep(self.polling_time)
def export_stats_hosts(self):
while(1):
for host in self.topology.hosts:
if self.vm_allocation_manager.vms_allocated.has_key(host):
self.host_stats_file.write(str(len(self.vm_allocation_manager.vms_allocated[host]))+";")
else:
self.host_stats_file.write(str(0)+";")
self.host_stats_file.write("\n")
time.sleep(self.polling_time)
|
llvmpy/llvmpy
|
refs/heads/master
|
llpython/byte_control.py
|
1
|
# ______________________________________________________________________
from __future__ import absolute_import
import opcode
from . import opcode_util
import pprint
from .bytecode_visitor import BasicBlockVisitor, BenignBytecodeVisitorMixin
from .control_flow import ControlFlowGraph
# ______________________________________________________________________
class ControlFlowBuilder (BenignBytecodeVisitorMixin, BasicBlockVisitor):
'''Visitor responsible for traversing a bytecode basic block map and
building a control flow graph (CFG).
The primary purpose of this transformation is to create a CFG,
which is used by later transformers for dataflow analysis.
'''
def visit (self, flow, nargs = 0, *args, **kws):
'''Given a bytecode flow, and an optional number of arguments,
return a :py:class:`llpython.control_flow.ControlFlowGraph`
instance describing the full control flow of the bytecode
flow.'''
self.nargs = nargs
ret_val = super(ControlFlowBuilder, self).visit(flow, *args, **kws)
del self.nargs
return ret_val
def enter_blocks (self, blocks):
super(ControlFlowBuilder, self).enter_blocks(blocks)
self.blocks = blocks
self.block_list = list(blocks.keys())
self.block_list.sort()
self.cfg = ControlFlowGraph()
self.loop_stack = []
for block in self.block_list:
self.cfg.add_block(block, blocks[block])
def exit_blocks (self, blocks):
super(ControlFlowBuilder, self).exit_blocks(blocks)
assert self.blocks == blocks
self.cfg.compute_dataflow()
self.cfg.update_for_ssa()
ret_val = self.cfg
del self.loop_stack
del self.cfg
del self.block_list
del self.blocks
return ret_val
def enter_block (self, block):
self.block = block
assert block in self.cfg.blocks
if block == 0:
for local_index in range(self.nargs):
self.op_STORE_FAST(0, opcode.opmap['STORE_FAST'], local_index)
return True
def _get_next_block (self, block):
return self.block_list[self.block_list.index(block) + 1]
def exit_block (self, block):
assert block == self.block
del self.block
i, op, arg = self.blocks[block][-1]
opname = opcode.opname[op]
if op in opcode.hasjabs:
self.cfg.add_edge(block, arg)
elif op in opcode.hasjrel:
self.cfg.add_edge(block, i + arg + 3)
elif opname == 'BREAK_LOOP':
loop_i, _, loop_arg = self.loop_stack[-1]
self.cfg.add_edge(block, loop_i + loop_arg + 3)
elif opname != 'RETURN_VALUE':
self.cfg.add_edge(block, self._get_next_block(block))
if op in opcode_util.hascbranch:
self.cfg.add_edge(block, self._get_next_block(block))
def op_LOAD_FAST (self, i, op, arg, *args, **kws):
self.cfg.blocks_reads[self.block].add(arg)
return super(ControlFlowBuilder, self).op_LOAD_FAST(i, op, arg, *args,
**kws)
def op_STORE_FAST (self, i, op, arg, *args, **kws):
self.cfg.writes_local(self.block, i, arg)
return super(ControlFlowBuilder, self).op_STORE_FAST(i, op, arg, *args,
**kws)
def op_SETUP_LOOP (self, i, op, arg, *args, **kws):
self.loop_stack.append((i, op, arg))
return super(ControlFlowBuilder, self).op_SETUP_LOOP(i, op, arg, *args,
**kws)
def op_POP_BLOCK (self, i, op, arg, *args, **kws):
self.loop_stack.pop()
return super(ControlFlowBuilder, self).op_POP_BLOCK(i, op, arg, *args,
**kws)
# ______________________________________________________________________
def build_cfg (func):
'''Given a Python function, create a bytecode flow, visit the flow
object, and return a control flow graph.'''
co_obj = opcode_util.get_code_object(func)
return ControlFlowBuilder().visit(opcode_util.build_basic_blocks(co_obj),
co_obj.co_argcount)
# ______________________________________________________________________
# Main (self-test) routine
def main (*args, **kws):
from tests import llfuncs
if not args:
args = ('doslice',)
for arg in args:
build_cfg(getattr(llfuncs, arg)).pprint()
# ______________________________________________________________________
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
# ______________________________________________________________________
# End of byte_control.py
|
ecederstrand/django
|
refs/heads/master
|
tests/field_defaults/models.py
|
410
|
# coding: utf-8
"""
Callable defaults
You can pass callable objects as the ``default`` parameter to a field. When
the object is created without an explicit value passed in, Django will call
the method to determine the default value.
This example uses ``datetime.datetime.now`` as the default for the ``pub_date``
field.
"""
from datetime import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField(default=datetime.now)
def __str__(self):
return self.headline
|
jhawkesworth/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_domain_computer.py
|
25
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, AMTEGA - Xunta de Galicia
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_domain_computer
short_description: Manage computers in Active Directory
description:
- Create, read, update and delete computers in Active Directory using a
windows bridge computer to launch New-ADComputer, Get-ADComputer,
Set-ADComputer, Remove-ADComputer and Move-ADObject powershell commands.
version_added: '2.6'
options:
name:
description:
- Specifies the name of the object.
- This parameter sets the Name property of the Active Directory object.
- The LDAP display name (ldapDisplayName) of this property is name.
type: str
required: true
sam_account_name:
description:
- Specifies the Security Account Manager (SAM) account name of the
computer.
- It maximum is 256 characters, 15 is advised for older
operating systems compatibility.
- The LDAP display name (ldapDisplayName) for this property is sAMAccountName.
- If ommitted the value is the same as C(name).
- Note that all computer SAMAccountNames need to end with a $.
type: str
enabled:
description:
- Specifies if an account is enabled.
- An enabled account requires a password.
- This parameter sets the Enabled property for an account object.
- This parameter also sets the ADS_UF_ACCOUNTDISABLE flag of the
Active Directory User Account Control (UAC) attribute.
type: bool
default: yes
ou:
description:
- Specifies the X.500 path of the Organizational Unit (OU) or container
where the new object is created. Required when I(state=present).
type: str
description:
description:
- Specifies a description of the object.
- This parameter sets the value of the Description property for the object.
- The LDAP display name (ldapDisplayName) for this property is description.
type: str
default: ''
dns_hostname:
description:
- Specifies the fully qualified domain name (FQDN) of the computer.
- This parameter sets the DNSHostName property for a computer object.
- The LDAP display name for this property is dNSHostName.
- Required when I(state=present).
type: str
domain_username:
description:
- The username to use when interacting with AD.
- If this is not set then the user Ansible used to log in with will be
used instead when using CredSSP or Kerberos with credential delegation.
type: str
version_added: '2.8'
domain_password:
description:
- The password for I(username).
type: str
version_added: '2.8'
domain_server:
description:
- Specifies the Active Directory Domain Services instance to connect to.
- Can be in the form of an FQDN or NetBIOS name.
- If not specified then the value is based on the domain of the computer
running PowerShell.
type: str
version_added: '2.8'
state:
description:
- Specified whether the computer should be C(present) or C(absent) in
Active Directory.
type: str
choices: [ absent, present ]
default: present
seealso:
- module: win_domain
- module: win_domain_controller
- module: win_domain_group
- module: win_domain_membership
- module: win_domain_user
author:
- Daniel Sánchez Fábregas (@Daniel-Sanchez-Fabregas)
'''
EXAMPLES = r'''
- name: Add linux computer to Active Directory OU using a windows machine
win_domain_computer:
name: one_linux_server.my_org.local
sam_account_name: linux_server
dns_hostname: one_linux_server.my_org.local
ou: "OU=servers,DC=my_org,DC=local"
description: Example of linux server
enabled: yes
state: present
delegate_to: my_windows_bridge.my_org.local
- name: Remove linux computer from Active Directory using a windows machine
win_domain_computer:
name: one_linux_server.my_org.local
state: absent
delegate_to: my_windows_bridge.my_org.local
'''
RETURN = r'''
'''
|
askabelin/django-constance
|
refs/heads/master
|
example/cheeseshop/apps/storage/models.py
|
20
|
from django.db import models
class Shelf(models.Model):
name = models.CharField(max_length=75)
class Meta:
verbose_name_plural = 'shelves'
class Supply(models.Model):
name = models.CharField(max_length=75)
class Meta:
verbose_name_plural = 'supplies'
|
m8ttyB/socorro
|
refs/heads/master
|
socorro/unittest/external/postgresql/test_crashstorage.py
|
1
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
import mock
from nose.tools import eq_, ok_, assert_raises
from psycopg2 import OperationalError
from configman import ConfigurationManager
from configman.dotdict import DotDict
from socorro.database.transaction_executor import (
TransactionExecutorWithLimitedBackoff,
TransactionExecutorWithInfiniteBackoff
)
from socorro.external.postgresql.crashstorage import (
PostgreSQLBasicCrashStorage,
PostgreSQLCrashStorage,
)
from socorro.unittest.testbase import TestCase
empty_tuple = ()
a_raw_crash = {
"submitted_timestamp": "2012-04-08 10:52:42.0",
"ProductName": "Fennicky",
"Version": "6.02E23",
}
a_processed_crash = {
"addons": [["{1a5dabbd-0e74-41da-b532-a364bb552cab}", "1.0.4.1"]],
"addons_checked": None,
"address": "0x1c",
"app_notes": "...",
"build": "20120309050057",
"client_crash_date": "2012-04-08 10:52:42.0",
"completeddatetime": "2012-04-08 10:56:50.902884",
"cpu_info": "None | 0",
"cpu_name": "arm",
"crashedThread": 8,
"date_processed": "2012-04-08 10:56:41.558922",
"distributor": None,
"distributor_version": None,
"dump": "...",
"email": "bogus@bogus.com",
"exploitability": "high",
# "flash_process_dump": "flash dump", # future
"flash_version": "[blank]",
"hangid": None,
"id": 361399767,
"install_age": 22385,
"last_crash": None,
"os_name": "Linux",
"os_version": "0.0.0 Linux 2.6.35.7-perf-CL727859 #1 ",
"processor_notes": "SignatureTool: signature truncated due to length",
"process_type": "plugin",
"product": "FennecAndroid",
"productid": "FA-888888",
"PluginFilename": "dwight.txt",
"PluginName": "wilma",
"PluginVersion": "69",
"reason": "SIGSEGV",
"release_channel": "default",
"ReleaseChannel": "default",
"signature": "libxul.so@0x117441c",
"startedDateTime": "2012-04-08 10:56:50.440752",
"success": True,
"topmost_filenames": [],
"truncated": False,
"uptime": 170,
"url": "http://embarrassing.porn.com",
"user_comments": None,
"user_id": None,
"uuid": "936ce666-ff3b-4c7a-9674-367fe2120408",
"version": "13.0a1",
}
a_processed_crash_with_everything_too_long = {
"addons": [["{1a5dabbd-0e74-41da-b532-a364bb552cab}", "1.0.4.1"]],
"addons_checked": None,
"address": "*" * 25,
"app_notes": "*" * 1200,
"build": "*" * 35,
"client_crash_date": "2012-04-08 10:52:42.0",
"completeddatetime": "2012-04-08 10:56:50.902884",
"cpu_info": "*" * 105,
"cpu_name": "*" * 107,
"crashedThread": 8,
"date_processed": "2012-04-08 10:56:41.558922",
"distributor": '*' * 24,
"distributor_version": '*' * 25,
"dump": "...",
"email": "*" * 101,
"exploitability": "high",
# "flash_process_dump": "flash dump", # future
"flash_version": "[blank]",
"hangid": None,
"id": 361399767,
"install_age": 22385,
"last_crash": None,
"os_name": "*" * 111,
"os_version": "*" * 102,
"processor_notes": "SignatureTool: signature truncated due to length",
"process_type": "plugin",
"product": "*" * 34,
"productid": "FA-888888",
"PluginFilename": "dwight.txt",
"PluginName": "wilma",
"PluginVersion": "69",
"reason": "*" * 257,
"release_channel": "default",
"ReleaseChannel": "default",
"signature": "*" * 300,
"startedDateTime": "2012-04-08 10:56:50.440752",
"success": True,
"topmost_filenames": [],
"truncated": False,
"uptime": 170,
"url": "*" * 288,
"user_comments": "*" * 1111,
"user_id": '*' * 80,
"uuid": "936ce666-ff3b-4c7a-9674-367fe2120408",
"version": "*" * 18,
}
a_processed_report_with_everything_truncated = [
None,
("*" * 25)[:20],
("*" * 1200)[:1024],
("*" * 35)[:30],
"2012-04-08 10:52:42.0",
"2012-04-08 10:56:50.902884",
("*" * 105)[:100],
("*" * 107)[:100],
"2012-04-08 10:56:41.558922",
('*' * 24)[:20],
('*' * 25)[:20],
("*" * 101)[:100],
"high",
"[blank]",
None,
22385,
None,
("*" * 111)[:100],
("*" * 102)[:100],
"SignatureTool: signature truncated due to length",
"plugin",
("*" * 34)[:30],
"FA-888888",
("*" * 257)[:255],
"default",
("*" * 300)[:255],
"2012-04-08 10:56:50.440752",
True,
[],
False,
170,
("*" * 1111)[:1024],
('*' * 80)[:50],
("*" * 288)[:255],
"936ce666-ff3b-4c7a-9674-367fe2120408",
("*" * 18)[:16],
"936ce666-ff3b-4c7a-9674-367fe2120408",
]
def remove_whitespace(string):
return string.replace('\n', '').replace(' ', '')
class TestPostgresBasicCrashStorage(TestCase):
"""
Tests where the actual PostgreSQL part is mocked.
"""
def test_basic_key_error_on_save_processed(self):
mock_logging = mock.Mock()
mock_postgres = mock.Mock()
required_config = PostgreSQLCrashStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'database_class': mock_postgres
}],
argv_source=[]
)
with config_manager.context() as config:
crashstorage = PostgreSQLBasicCrashStorage(config)
database = crashstorage.database.return_value = mock.MagicMock()
ok_(isinstance(database, mock.Mock))
broken_processed_crash = {
"product": "Peter",
"version": "1.0B3",
"ooid": "abc123",
"submitted_timestamp": time.time(),
"unknown_field": 'whatever'
}
assert_raises(KeyError,
crashstorage.save_processed,
broken_processed_crash)
def test_basic_postgres_save_processed_success(self):
config = DotDict()
config.database_class = mock.MagicMock()
config.transaction_executor_class = TransactionExecutorWithInfiniteBackoff
config.redactor_class = mock.Mock()
config.backoff_delays = [1]
config.wait_log_interval = 10
config.logger = mock.Mock()
mocked_database_connection_source = config.database_class.return_value
mocked_connection = (
mocked_database_connection_source.return_value
.__enter__.return_value
)
mocked_cursor = mocked_connection.cursor.return_value.__enter__.return_value
# the call to be tested
crashstorage = PostgreSQLCrashStorage(config)
crashstorage.save_processed(a_processed_crash)
eq_(mocked_database_connection_source.call_count, 1)
eq_(mocked_cursor.execute.call_count, 3)
# check correct fragments
sql_fragments = [
"UPDATE reports_20120402",
'select id from plugins',
]
for a_call, a_fragment in zip(mocked_cursor.execute.call_args_list, sql_fragments):
ok_(a_fragment in a_call[0][0])
def test_basic_postgres_save_processed_success_2(self):
config = DotDict()
config.database_class = mock.MagicMock()
config.transaction_executor_class = TransactionExecutorWithInfiniteBackoff
config.redactor_class = mock.Mock()
config.backoff_delays = [1]
config.wait_log_interval = 10
config.logger = mock.Mock()
mocked_database_connection_source = config.database_class.return_value
mocked_connection = (
mocked_database_connection_source.return_value
.__enter__.return_value
)
mocked_cursor = mocked_connection.cursor.return_value.__enter__.return_value
fetch_all_returns = [((666,),), None, ((23,),), ]
def fetch_all_func(*args):
result = fetch_all_returns.pop(0)
return result
mocked_cursor.fetchall = fetch_all_func
# the call to be tested
crashstorage = PostgreSQLCrashStorage(config)
crashstorage.save_processed(a_processed_crash)
eq_(mocked_database_connection_source.call_count, 1)
eq_(mocked_cursor.execute.call_count, 4)
# check correct fragments
sql_fragments = [
"UPDATE reports_20120402",
'select id from plugins',
'insert into plugins',
]
for a_call, a_fragment in zip(mocked_cursor.execute.call_args_list, sql_fragments):
ok_(a_fragment in a_call[0][0])
def test_basic_postgres_save_processed_success_3_truncations(self):
config = DotDict()
config.database_class = mock.MagicMock()
config.transaction_executor_class = TransactionExecutorWithInfiniteBackoff
config.redactor_class = mock.Mock()
config.backoff_delays = [1]
config.wait_log_interval = 10
config.logger = mock.Mock()
mocked_database_connection_source = config.database_class.return_value
mocked_connection = (
mocked_database_connection_source.return_value
.__enter__.return_value
)
mocked_cursor = mocked_connection.cursor.return_value.__enter__.return_value
# the call to be tested
crashstorage = PostgreSQLCrashStorage(config)
crashstorage.save_processed(a_processed_crash_with_everything_too_long)
eq_(mocked_database_connection_source.call_count, 1)
eq_(mocked_cursor.execute.call_count, 3)
# check correct fragments
first_call = mocked_cursor.execute.call_args_list[0]
eq_(
first_call[0][1],
a_processed_report_with_everything_truncated * 2
)
def test_basic_postgres_save_processed_operational_error(self):
mock_logging = mock.Mock()
mock_postgres = mock.Mock()
required_config = PostgreSQLCrashStorage.get_required_config()
required_config.add_option(
'logger',
default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'database_class': mock_postgres,
'transaction_executor_class':
TransactionExecutorWithLimitedBackoff,
'backoff_delays': [0, 0, 0],
}],
argv_source=[]
)
with config_manager.context() as config:
crashstorage = PostgreSQLCrashStorage(config)
crashstorage.database.operational_exceptions = (OperationalError,)
database = crashstorage.database.return_value = mock.MagicMock()
ok_(isinstance(database, mock.Mock))
m = mock.MagicMock()
m.__enter__.return_value = m
database = crashstorage.database.return_value = m
m.cursor.side_effect = OperationalError('bad')
assert_raises(OperationalError,
crashstorage.save_processed,
a_processed_crash)
eq_(m.cursor.call_count, 3)
class TestPostgresCrashStorage(TestCase):
"""
Tests where the actual PostgreSQL part is mocked.
"""
def test_basic_postgres_save_raw_crash(self):
mock_logging = mock.Mock()
mock_postgres = mock.Mock()
required_config = PostgreSQLCrashStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'database_class': mock_postgres
}],
argv_source=[]
)
with config_manager.context() as config:
crashstorage = PostgreSQLCrashStorage(config)
database = crashstorage.database.return_value = mock.MagicMock()
ok_(isinstance(database, mock.Mock))
ok_('submitted_timestamp' in a_raw_crash)
m = mock.MagicMock()
m.__enter__.return_value = m
database = crashstorage.database.return_value = m
crashstorage.save_raw_crash(
a_raw_crash,
'',
"936ce666-ff3b-4c7a-9674-367fe2120408"
)
eq_(m.cursor.call_count, 1)
eq_(m.cursor.return_value.__enter__.return_value.execute.call_count, 1)
expected_execute_args = ((("""
WITH update_raw_crash AS (
UPDATE raw_crashes_20120402 SET
raw_crash = %(raw_crash)s,
date_processed = %(date_processed)s
WHERE uuid = %(crash_id)s
RETURNING 1
),
insert_raw_crash AS (
INSERT into raw_crashes_20120402
(uuid, raw_crash, date_processed)
( SELECT
%(crash_id)s as uuid,
%(raw_crash)s as raw_crash,
%(date_processed)s as date_processed
WHERE NOT EXISTS (
SELECT uuid from raw_crashes_20120402
WHERE
uuid = %(crash_id)s
LIMIT 1
)
)
RETURNING 2
)
SELECT * from update_raw_crash
UNION ALL
SELECT * from insert_raw_crash
""", {
'crash_id': '936ce666-ff3b-4c7a-9674-367fe2120408',
'raw_crash': (
'{"submitted_timestamp": "2012-04-08 10:52:42.0", '
'"Version": "6.02E23", "ProductName": "Fennicky"}'
),
'date_processed': "2012-04-08 10:52:42.0"
}),),)
actual_execute_args = m.cursor().execute.call_args_list
for expected, actual in zip(expected_execute_args,
actual_execute_args):
expeceted_sql, expected_params = expected[0]
expeceted_sql = remove_whitespace(expeceted_sql)
actual_sql, actual_params = actual[0]
actual_sql = remove_whitespace(actual_sql)
eq_(expeceted_sql, actual_sql)
eq_(expected_params, actual_params)
def test_basic_key_error_on_save_processed(self):
mock_logging = mock.Mock()
mock_postgres = mock.Mock()
required_config = PostgreSQLCrashStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'database_class': mock_postgres
}],
argv_source=[]
)
with config_manager.context() as config:
crashstorage = PostgreSQLCrashStorage(config)
database = crashstorage.database.return_value = mock.MagicMock()
ok_(isinstance(database, mock.Mock))
broken_processed_crash = {
"product": "Peter",
"version": "1.0B3",
"ooid": "abc123",
"submitted_timestamp": time.time(),
"unknown_field": 'whatever'
}
assert_raises(KeyError,
crashstorage.save_processed,
broken_processed_crash)
def test_basic_postgres_save_processed_success(self):
config = DotDict()
config.database_class = mock.MagicMock()
config.transaction_executor_class = TransactionExecutorWithInfiniteBackoff
config.redactor_class = mock.Mock()
config.backoff_delays = [1]
config.wait_log_interval = 10
config.logger = mock.Mock()
mocked_database_connection_source = config.database_class.return_value
mocked_connection = (
mocked_database_connection_source.return_value
.__enter__.return_value
)
mocked_cursor = mocked_connection.cursor.return_value.__enter__.return_value
# the call to be tested
crashstorage = PostgreSQLCrashStorage(config)
crashstorage.save_processed(a_processed_crash)
eq_(mocked_database_connection_source.call_count, 1)
eq_(mocked_cursor.execute.call_count, 3)
# check correct fragments
sql_fragments = [
"UPDATE reports_20120402",
'select id from plugins',
'UPDATE processed_crashes_20120402'
]
for a_call, a_fragment in zip(mocked_cursor.execute.call_args_list, sql_fragments):
ok_(a_fragment in a_call[0][0])
def test_basic_postgres_save_processed_success_2(self):
config = DotDict()
config.database_class = mock.MagicMock()
config.transaction_executor_class = TransactionExecutorWithInfiniteBackoff
config.redactor_class = mock.Mock()
config.backoff_delays = [1]
config.wait_log_interval = 10
config.logger = mock.Mock()
mocked_database_connection_source = config.database_class.return_value
mocked_connection = (
mocked_database_connection_source.return_value
.__enter__.return_value
)
mocked_cursor = mocked_connection.cursor.return_value.__enter__.return_value
fetch_all_returns = [((666,),), None, ((23,),), ]
def fetch_all_func(*args):
result = fetch_all_returns.pop(0)
return result
mocked_cursor.fetchall = fetch_all_func
# the call to be tested
crashstorage = PostgreSQLCrashStorage(config)
crashstorage.save_processed(a_processed_crash)
eq_(mocked_database_connection_source.call_count, 1)
eq_(mocked_cursor.execute.call_count, 4)
# check correct fragments
sql_fragments = [
"UPDATE reports_20120402",
'select id from plugins',
'insert into plugins',
'UPDATE processed_crashes_20120402'
]
for a_call, a_fragment in zip(mocked_cursor.execute.call_args_list, sql_fragments):
ok_(a_fragment in a_call[0][0])
def test_basic_postgres_save_processed_success_3_truncations(self):
config = DotDict()
config.database_class = mock.MagicMock()
config.transaction_executor_class = TransactionExecutorWithInfiniteBackoff
config.redactor_class = mock.Mock()
config.backoff_delays = [1]
config.wait_log_interval = 10
config.logger = mock.Mock()
mocked_database_connection_source = config.database_class.return_value
mocked_connection = (
mocked_database_connection_source.return_value
.__enter__.return_value
)
mocked_cursor = mocked_connection.cursor.return_value.__enter__.return_value
# the call to be tested
crashstorage = PostgreSQLCrashStorage(config)
crashstorage.save_processed(a_processed_crash_with_everything_too_long)
eq_(mocked_database_connection_source.call_count, 1)
eq_(mocked_cursor.execute.call_count, 3)
# check correct fragments
first_call = mocked_cursor.execute.call_args_list[0]
eq_(
first_call[0][1],
a_processed_report_with_everything_truncated * 2
)
def test_basic_postgres_save_processed_operational_error(self):
mock_logging = mock.Mock()
mock_postgres = mock.Mock()
required_config = PostgreSQLCrashStorage.get_required_config()
required_config.add_option(
'logger',
default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'database_class': mock_postgres,
'transaction_executor_class':
TransactionExecutorWithLimitedBackoff,
'backoff_delays': [0, 0, 0],
}],
argv_source=[]
)
with config_manager.context() as config:
crashstorage = PostgreSQLCrashStorage(config)
crashstorage.database.operational_exceptions = (OperationalError,)
database = crashstorage.database.return_value = mock.MagicMock()
ok_(isinstance(database, mock.Mock))
m = mock.MagicMock()
m.__enter__.return_value = m
database = crashstorage.database.return_value = m
m.cursor.side_effect = OperationalError('bad')
assert_raises(OperationalError,
crashstorage.save_processed,
a_processed_crash)
eq_(m.cursor.call_count, 3)
def test_get_raw_crash(self):
mock_logging = mock.Mock()
mock_postgres = mock.Mock()
mock_postgres.return_value = mock.MagicMock()
required_config = PostgreSQLCrashStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'database_class': mock_postgres,
'transaction_executor_class':
TransactionExecutorWithLimitedBackoff,
'backoff_delays': [0, 0, 0],
}],
argv_source=[]
)
with config_manager.context() as config:
a_crash_id = "936ce666-ff3b-4c7a-9674-367fe2120408"
crashstorage = PostgreSQLCrashStorage(config)
connection = crashstorage.database.return_value.__enter__.return_value
connection.cursor.return_value.__enter__.return_value.fetchall.return_value = [[
{
'uuid': a_crash_id,
}
]]
a_crash = crashstorage.get_raw_crash(a_crash_id)
ok_(a_crash['uuid'] == a_crash_id)
connection.cursor.return_value.__enter__.return_value.execute. \
assert_called_with(
'select raw_crash from raw_crashes_20120402 where uuid = %s',
('936ce666-ff3b-4c7a-9674-367fe2120408',)
)
|
lache/RacingKingLee
|
refs/heads/master
|
monitor/engine.win64/2.74/python/lib/encodings/cp1257.py
|
272
|
""" Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1257',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\ufffe' # 0x83 -> UNDEFINED
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\xa8' # 0x8D -> DIAERESIS
'\u02c7' # 0x8E -> CARON
'\xb8' # 0x8F -> CEDILLA
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\xaf' # 0x9D -> MACRON
'\u02db' # 0x9E -> OGONEK
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\ufffe' # 0xA1 -> UNDEFINED
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\ufffe' # 0xA5 -> UNDEFINED
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xe6' # 0xBF -> LATIN SMALL LETTER AE
'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
ktossell/rust
|
refs/heads/master
|
src/etc/lldb_rust_formatters.py
|
5
|
# Copyright 2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import lldb
def print_val(val, internal_dict):
'''Prints the given value with Rust syntax'''
type_class = val.GetType().GetTypeClass()
if type_class == lldb.eTypeClassStruct:
return print_struct_val(val, internal_dict)
if type_class == lldb.eTypeClassUnion:
return print_enum_val(val, internal_dict)
if type_class == lldb.eTypeClassPointer:
return print_pointer_val(val, internal_dict)
if type_class == lldb.eTypeClassArray:
return print_fixed_size_vec_val(val, internal_dict)
return val.GetValue()
#=--------------------------------------------------------------------------------------------------
# Type-Specialized Printing Functions
#=--------------------------------------------------------------------------------------------------
def print_struct_val(val, internal_dict):
'''Prints a struct, tuple, or tuple struct value with Rust syntax'''
assert val.GetType().GetTypeClass() == lldb.eTypeClassStruct
if is_vec_slice(val):
return print_vec_slice_val(val, internal_dict)
else:
return print_struct_val_starting_from(0, val, internal_dict)
def print_vec_slice_val(val, internal_dict):
length = val.GetChildAtIndex(1).GetValueAsUnsigned()
data_ptr_val = val.GetChildAtIndex(0)
data_ptr_type = data_ptr_val.GetType()
assert data_ptr_type.IsPointerType()
element_type = data_ptr_type.GetPointeeType()
element_type_size = element_type.GetByteSize()
start_address = data_ptr_val.GetValueAsUnsigned()
def render_element(i):
address = start_address + i * element_type_size
element_val = val.CreateValueFromAddress( val.GetName() + ("[%s]" % i), address, element_type)
return print_val(element_val, internal_dict)
return "&[%s]" % (', '.join([render_element(i) for i in range(length)]))
def print_struct_val_starting_from(field_start_index, val, internal_dict):
'''
Prints a struct, tuple, or tuple struct value with Rust syntax.
Ignores any fields before field_start_index.
'''
assert val.GetType().GetTypeClass() == lldb.eTypeClassStruct
t = val.GetType()
type_name = extract_type_name(t.GetName())
num_children = val.num_children
if (num_children - field_start_index) == 0:
# The only field of this struct is the enum discriminant
return type_name
has_field_names = type_has_field_names(t)
if has_field_names:
template = "%(type_name)s {\n%(body)s\n}"
separator = ", \n"
else:
template = "%(type_name)s(%(body)s)"
separator = ", "
if type_name.startswith("("):
# this is a tuple, so don't print the type name
type_name = ""
def render_child(child_index):
this = ""
if has_field_names:
field_name = t.GetFieldAtIndex(child_index).GetName()
this += field_name + ": "
field_val = val.GetChildAtIndex(child_index)
return this + print_val(field_val, internal_dict)
body = separator.join([render_child(idx) for idx in range(field_start_index, num_children)])
return template % {"type_name": type_name,
"body": body}
def print_enum_val(val, internal_dict):
'''Prints an enum value with Rust syntax'''
assert val.GetType().GetTypeClass() == lldb.eTypeClassUnion
if val.num_children == 1:
# This is either an enum with just one variant, or it is an Option-like enum
# where the discriminant is encoded in a non-nullable pointer field. We find
# out which one it is by looking at the member name of the sole union
# variant. If it starts with "RUST$ENCODED$ENUM$" then we have an
# Option-like enum.
first_variant_name = val.GetChildAtIndex(0).GetName()
if first_variant_name and first_variant_name.startswith("RUST$ENCODED$ENUM$"):
# This is an Option-like enum. The position of the discriminator field is
# encoded in the name which has the format:
# RUST$ENCODED$ENUM$<index of discriminator field>$<name of null variant>
last_separator_index = first_variant_name.rfind("$")
if last_separator_index == -1:
return "<invalid enum encoding: %s>" % first_variant_name
start_index = len("RUST$ENCODED$ENUM$")
# Extract indices of the discriminator field
try:
disr_field_indices = first_variant_name[start_index :
last_separator_index].split("$")
disr_field_indices = [int(index) for index in disr_field_indices]
except:
return "<invalid enum encoding: %s>" % first_variant_name
# Read the discriminant
disr_val = val.GetChildAtIndex(0)
for index in disr_field_indices:
disr_val = disr_val.GetChildAtIndex(index)
# If the discriminant field is a fat pointer we have to consider the
# first word as the true discriminant
if disr_val.GetType().GetTypeClass() == lldb.eTypeClassStruct:
disr_val = disr_val.GetChildAtIndex(0)
if disr_val.GetValueAsUnsigned() == 0:
# Null case: Print the name of the null-variant
null_variant_name = first_variant_name[last_separator_index + 1:]
return null_variant_name
else:
# Non-null case: Interpret the data as a value of the non-null variant type
return print_struct_val_starting_from(0, val.GetChildAtIndex(0), internal_dict)
else:
# This is just a regular uni-variant enum without discriminator field
return print_struct_val_starting_from(0, val.GetChildAtIndex(0), internal_dict)
# If we are here, this is a regular enum with more than one variant
disr_val = val.GetChildAtIndex(0).GetChildMemberWithName("RUST$ENUM$DISR")
disr_type = disr_val.GetType()
if disr_type.GetTypeClass() != lldb.eTypeClassEnumeration:
return "<Invalid enum value encountered: Discriminator is not an enum>"
variant_index = disr_val.GetValueAsUnsigned()
return print_struct_val_starting_from(1, val.GetChildAtIndex(variant_index), internal_dict)
def print_pointer_val(val, internal_dict):
'''Prints a pointer value with Rust syntax'''
assert val.GetType().IsPointerType()
sigil = "&"
type_name = extract_type_name(val.GetType().GetName())
if type_name and type_name[0:1] in ["&", "~", "*"]:
sigil = type_name[0:1]
return sigil + hex(val.GetValueAsUnsigned()) #print_val(val.Dereference(), internal_dict)
def print_fixed_size_vec_val(val, internal_dict):
assert val.GetType().GetTypeClass() == lldb.eTypeClassArray
output = "["
for i in range(val.num_children):
output += print_val(val.GetChildAtIndex(i), internal_dict)
if i != val.num_children - 1:
output += ", "
output += "]"
return output
#=--------------------------------------------------------------------------------------------------
# Helper Functions
#=--------------------------------------------------------------------------------------------------
unqualified_type_markers = frozenset(["(", "[", "&", "*"])
def extract_type_name(qualified_type_name):
'''Extracts the type name from a fully qualified path'''
if qualified_type_name[0] in unqualified_type_markers:
return qualified_type_name
end_of_search = qualified_type_name.find("<")
if end_of_search < 0:
end_of_search = len(qualified_type_name)
index = qualified_type_name.rfind("::", 0, end_of_search)
if index < 0:
return qualified_type_name
else:
return qualified_type_name[index + 2:]
def type_has_field_names(ty):
'''Returns true of this is a type with field names (struct, struct-like enum variant)'''
# This may also be an enum variant where the first field doesn't have a name but the rest has
if ty.GetNumberOfFields() > 1:
return ty.GetFieldAtIndex(1).GetName() != None
else:
return ty.GetFieldAtIndex(0).GetName() != None
def is_vec_slice(val):
ty = val.GetType()
if ty.GetTypeClass() != lldb.eTypeClassStruct:
return False
if ty.GetNumberOfFields() != 2:
return False
if ty.GetFieldAtIndex(0).GetName() != "data_ptr":
return False
if ty.GetFieldAtIndex(1).GetName() != "length":
return False
type_name = extract_type_name(ty.GetName()).replace("&'static", "&").replace(" ", "")
return type_name.startswith("&[") and type_name.endswith("]")
# vi: sw=2:ts=2
|
charlesvdv/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/service-workers/service-worker/resources/update-max-aged-worker-imported-script.py
|
39
|
import time
def main(request, response):
headers = [('Cache-Control', 'max-age=86400'),
('Content-Type', 'application/javascript'),
('Last-Modified', time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime()))]
revalidate = request.headers.has_key('if-modified-since');
body = '''
self.addEventListener('message', function(e) {
e.data.port.postMessage({
from: "imported",
type: "%s",
value: %s
});
});
''' % ('revalidate' if revalidate else 'normal', time.time())
return headers, body
|
smmribeiro/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/multiLine/IndentMulti21.after.py
|
996
|
class C:
def foo(self):
x = 1
y = 2
y = 2
|
eswartz/RenderPipeline
|
refs/heads/master
|
rpcore/gui/checkbox.py
|
2
|
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from panda3d.core import TransparencyAttrib, SamplerState
from direct.gui.DirectCheckBox import DirectCheckBox
import direct.gui.DirectGuiGlobals as DGG
from rpcore.rpobject import RPObject
from rpcore.loader import RPLoader
class Checkbox(RPObject):
""" This is a wrapper around DirectCheckBox, providing a simpler interface
and better visuals """
def __init__(self, parent=None, x=0, y=0, callback=None, extra_args=None,
radio=False, expand_width=100, checked=False, enabled=True):
RPObject.__init__(self)
prefix = "checkbox" if not radio else "radiobox"
if enabled:
checked_img = RPLoader.load_texture(
"/$$rp/data/gui/" + prefix + "_checked.png")
unchecked_img = RPLoader.load_texture(
"/$$rp/data/gui/" + prefix + "_default.png")
else:
checked_img = RPLoader.load_texture(
"/$$rp/data/gui/" + prefix + "_disabled.png")
unchecked_img = checked_img
# Set near filter, otherwise textures look like crap
for tex in [checked_img, unchecked_img]:
tex.set_minfilter(SamplerState.FT_linear)
tex.set_magfilter(SamplerState.FT_linear)
tex.set_wrap_u(SamplerState.WM_clamp)
tex.set_wrap_v(SamplerState.WM_clamp)
tex.set_anisotropic_degree(0)
self._node = DirectCheckBox(
parent=parent, pos=(x + 11, 1, -y - 8), scale=(10 / 2.0, 1, 10 / 2.0),
checkedImage=checked_img, uncheckedImage=unchecked_img,
image=unchecked_img, extraArgs=extra_args, state=DGG.NORMAL,
relief=DGG.FLAT, command=self._update_status)
self._node["frameColor"] = (0, 0, 0, 0)
self._node["frameSize"] = (-2.6, 2 + expand_width / 7.5, -2.35, 2.5)
self._node.set_transparency(TransparencyAttrib.M_alpha)
self._callback = callback
self._extra_args = extra_args
self._collection = None
if checked:
self.set_checked(True, False)
@property
def collection(self):
""" Returns a handle to the assigned checkbox collection, or None
if no collection was assigned """
return self._collection
@collection.setter
def collection(self, coll):
""" Internal method to add a checkbox to a checkbox collection, this
is used for radio-buttons """
self._collection = coll
@property
def checked(self):
""" Returns whether the node is currently checked """
return self._node["isChecked"]
@property
def node(self):
""" Returns a handle to the internally used node """
return self._node
def _update_status(self, status):
""" Internal method when another checkbox in the same radio group
changed it's value """
if not status and self._collection:
self._node.commandFunc(None)
return
if self._collection:
if status:
self._collection.on_checkbox_changed(self)
# A radio box can't be unchecked
# self._node["state"] = DGG.DISABLED
if self._callback is not None:
self._callback(*([status] + self._extra_args))
def set_checked(self, val, do_callback=True):
""" Internal method to check/uncheck the checkbox """
self._node["isChecked"] = val
if val:
self._node["image"] = self._node["checkedImage"]
else:
self._node["image"] = self._node["uncheckedImage"]
if do_callback and self._callback is not None:
self._callback(*([val] + self._extra_args))
|
sporksmith/polygraph
|
refs/heads/master
|
polygraph/__init__.py
|
12
|
# Polygraph (release 0.1)
# Signature generation algorithms for polymorphic worms
#
# Copyright (c) 2004-2005, Intel Corporation
# All Rights Reserved
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
|
jriehl/numba
|
refs/heads/master
|
numba/cuda/args.py
|
2
|
"""
Hints to wrap Kernel arguments to indicate how to manage host-device
memory transfers before & after the kernel call.
"""
import abc
from numba.six import add_metaclass
from numba.typing.typeof import typeof, Purpose
@add_metaclass(abc.ABCMeta)
class ArgHint:
def __init__(self, value):
self.value = value
@abc.abstractmethod
def to_device(self, retr, stream=0):
"""
:param stream: a stream to use when copying data
:param retr:
a list of clean-up work to do after the kernel's been run.
Append 0-arg lambdas to it!
:return: a value (usually an `DeviceNDArray`) to be passed to
the kernel
"""
pass
@property
def _numba_type_(self):
return typeof(self.value, Purpose.argument)
class In(ArgHint):
def to_device(self, retr, stream=0):
from .cudadrv.devicearray import auto_device
devary, _ = auto_device(
self.value,
stream=stream)
# A dummy writeback functor to keep devary alive until the kernel
# is called.
retr.append(lambda: devary)
return devary
class Out(ArgHint):
def to_device(self, retr, stream=0):
from .cudadrv.devicearray import auto_device
devary, conv = auto_device(
self.value,
copy=False,
stream=stream)
if conv:
retr.append(lambda: devary.copy_to_host(self.value, stream=stream))
return devary
class InOut(ArgHint):
def to_device(self, retr, stream=0):
from .cudadrv.devicearray import auto_device
devary, conv = auto_device(
self.value,
stream=stream)
if conv:
retr.append(lambda: devary.copy_to_host(self.value, stream=stream))
return devary
def wrap_arg(value, default=InOut):
return value if isinstance(value, ArgHint) else default(value)
__all__ = [
'In',
'Out',
'InOut',
'ArgHint',
'wrap_arg',
]
|
3quarterstack/simple_blog
|
refs/heads/master
|
django/contrib/gis/sitemaps/georss.py
|
314
|
from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
class GeoRSSSitemap(Sitemap):
"""
A minimal hook to produce sitemaps for GeoRSS feeds.
"""
def __init__(self, feed_dict, slug_dict=None):
"""
This sitemap object initializes on a feed dictionary (as would be passed
to `django.contrib.gis.views.feed`) and a slug dictionary.
If the slug dictionary is not defined, then it's assumed the keys provide
the URL parameter to the feed. However, if you have a complex feed (e.g.,
you override `get_object`, then you'll need to provide a slug dictionary.
The slug dictionary should have the same keys as the feed dictionary, but
each value in the slug dictionary should be a sequence of slugs that may
be used for valid feeds. For example, let's say we have a feed that
returns objects for a specific ZIP code in our feed dictionary:
feed_dict = {'zipcode' : ZipFeed}
Then we would use a slug dictionary with a list of the zip code slugs
corresponding to feeds you want listed in the sitemap:
slug_dict = {'zipcode' : ['77002', '77054']}
"""
# Setting up.
self.feed_dict = feed_dict
self.locations = []
if slug_dict is None: slug_dict = {}
# Getting the feed locations.
for section in feed_dict.keys():
if slug_dict.get(section, False):
for slug in slug_dict[section]:
self.locations.append('%s/%s' % (section, slug))
else:
self.locations.append(section)
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls: url['geo_format'] = 'georss'
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.views.feed', args=(obj,))
|
DavidLP/home-assistant
|
refs/heads/dev
|
tests/components/logger/__init__.py
|
39
|
"""Tests for the logger component."""
|
harmy/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/encodings/iso8859_7.py
|
272
|
""" Python Character Mapping Codec iso8859_7 generated from 'MAPPINGS/ISO8859/8859-7.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-7',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u2018' # 0xA1 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xA2 -> RIGHT SINGLE QUOTATION MARK
'\xa3' # 0xA3 -> POUND SIGN
'\u20ac' # 0xA4 -> EURO SIGN
'\u20af' # 0xA5 -> DRACHMA SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u037a' # 0xAA -> GREEK YPOGEGRAMMENI
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\ufffe'
'\u2015' # 0xAF -> HORIZONTAL BAR
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\u0384' # 0xB4 -> GREEK TONOS
'\u0385' # 0xB5 -> GREEK DIALYTIKA TONOS
'\u0386' # 0xB6 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
'\ufffe'
'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
'\u03bd' # 0xED -> GREEK SMALL LETTER NU
'\u03be' # 0xEE -> GREEK SMALL LETTER XI
'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
kingcons/pybrightcove
|
refs/heads/master
|
tests/test_playlist.py
|
3
|
# Copyright (c) 2009 StudioNow, Inc <patrick@studionow.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Test the Playlist object
"""
import unittest
import uuid
import mock
from pybrightcove import enums
from pybrightcove import exceptions
from pybrightcove import playlist
from pybrightcove import video
TEST_VIDEO_ID = 11449913001
TEST_VIDEO_IDS = [TEST_VIDEO_ID, 24780403001, 24780402001]
TEST_PLAYLIST_ID = 24781161001
TEST_PLAYLIST_IDS = [TEST_PLAYLIST_ID, 10518202001]
TEST_PLAYLIST_REF_ID = 'unittest-playlist'
TEST_PLAYLIST_REF_IDS = [TEST_PLAYLIST_REF_ID, 'test']
class PlaylistTest(unittest.TestCase):
def setUp(self):
self.test_uuid = str(uuid.uuid4())
def _get_list_mock(self, ConnectionMock):
m = ConnectionMock()
c = mock.Mock()
c.items = [mock.Mock(), mock.Mock()]
c.total_count = 2
c.page_size = 0
m.get_list.return_value = c
return m
@mock.patch('pybrightcove.connection.APIConnection')
def test_instantiate_new(self, ConnectionMock):
m = ConnectionMock()
pl = playlist.Playlist(name='My Playlist', type=enums.PlaylistTypeEnum.EXPLICIT)
pl.video_ids = TEST_VIDEO_IDS
self.assertEquals(pl.id, None)
self.assertEquals(pl.name, 'My Playlist')
self.assertEquals(pl.type, enums.PlaylistTypeEnum.EXPLICIT)
self.assertEquals(pl.video_ids, TEST_VIDEO_IDS)
self.assertEquals(pl.short_description, None)
@mock.patch('pybrightcove.connection.APIConnection')
def test_instantiate_with_playlist_id(self, ConnectionMock):
m = ConnectionMock()
m.get_item.return_value = {'id': TEST_PLAYLIST_ID, 'name': '', 'shortDescription': '', 'referenceId': TEST_PLAYLIST_REF_ID, 'thumbnailURL': '', 'videoIds': [], 'playlistType': ''}
pl = playlist.Playlist(id=TEST_PLAYLIST_ID)
self.assertEquals(pl.reference_id, TEST_PLAYLIST_REF_ID)
@mock.patch('pybrightcove.connection.APIConnection')
def test_instantiate_with_reference_id(self, ConnectionMock):
m = ConnectionMock()
m.get_item.return_value = {'id': TEST_PLAYLIST_ID, 'name': '', 'shortDescription': '', 'referenceId': TEST_PLAYLIST_REF_ID, 'thumbnailURL': '', 'videoIds': [], 'playlistType': ''}
pl = playlist.Playlist(reference_id=TEST_PLAYLIST_REF_ID)
self.assertEquals(pl.id, TEST_PLAYLIST_ID)
@mock.patch('pybrightcove.connection.APIConnection')
def test_instantiate_with_invalid_parameters(self, ConnectionMock):
try:
pl = playlist.Playlist(name="No type specified")
self.fail('Should have raised an error.')
except exceptions.PyBrightcoveError, e:
self.assertEquals(str(e), 'Invalid parameters for Playlist.')
@mock.patch('pybrightcove.connection.APIConnection')
def test_save_new(self, ConnectionMock):
m = self._get_list_mock(ConnectionMock)
m.post.return_value = 10
pl = playlist.Playlist(name="Unit Test Videos",
type=enums.PlaylistTypeEnum.EXPLICIT)
for v in video.Video.find_by_tags(and_tags=['unittest', ]):
pl.videos.append(v)
pl.save()
self.assertEquals(pl.id, 10)
self.assertEquals(pl.name, 'Unit Test Videos')
self.assertEquals(m.method_calls[0][0], 'get_list')
self.assertEquals(m.method_calls[0][1][0], 'find_videos_by_tags')
self.assertEquals(m.method_calls[1][0], 'post')
self.assertEquals(m.method_calls[1][1][0], 'create_playlist')
@mock.patch('pybrightcove.connection.APIConnection')
def test_save_update(self, ConnectionMock):
m = ConnectionMock()
data = {}
data['id'] = TEST_PLAYLIST_ID
data['referenceId'] = TEST_PLAYLIST_REF_ID
data['name'] = "test-%s" % self.test_uuid
data['shortDescription'] = "My description"
data['thumbnailURL'] = "http://google.com"
data['videoIds'] = TEST_VIDEO_IDS
data['playlistType'] = enums.PlaylistTypeEnum.EXPLICIT
m.get_item.return_value = data
m.post.return_value = data
pl = playlist.Playlist(id=TEST_PLAYLIST_ID)
pl.name = 'test-%s' % self.test_uuid
pl.save()
self.assertEquals(pl.id, TEST_PLAYLIST_ID)
self.assertEquals(pl.name, 'test-%s' % self.test_uuid)
self.assertEquals(m.method_calls[0][0], 'get_item')
self.assertEquals(m.method_calls[0][1][0], 'find_playlist_by_id')
self.assertEquals(m.method_calls[1][0], 'post')
self.assertEquals(m.method_calls[1][1][0], 'update_playlist')
@mock.patch('pybrightcove.connection.APIConnection')
def test_delete(self, ConnectionMock):
m = self._get_list_mock(ConnectionMock)
m.post.return_value = 10
pl = playlist.Playlist(name="DELETE - Unit Test Videos",
type=enums.PlaylistTypeEnum.EXPLICIT)
for v in video.Video.find_by_tags(and_tags=['unittest', ]):
pl.videos.append(v)
self.assertEquals(pl.id, None)
pl.save()
self.assertEquals(pl.id, 10)
pl.delete()
self.assertEquals(pl.id, None)
self.assertEquals(m.method_calls[0][0], 'get_list')
self.assertEquals(m.method_calls[0][1][0], 'find_videos_by_tags')
self.assertEquals(m.method_calls[1][0], 'post')
self.assertEquals(m.method_calls[1][1][0], 'create_playlist')
self.assertEquals(m.method_calls[2][0], 'post')
self.assertEquals(m.method_calls[2][1][0], 'delete_playlist')
@mock.patch('pybrightcove.connection.APIConnection')
def test_find_by_ids(self, ConnectionMock):
m = self._get_list_mock(ConnectionMock)
playlists = playlist.Playlist.find_by_ids(TEST_PLAYLIST_IDS)
for pl in playlists:
print pl
print m.method_calls
self.assertEquals(m.method_calls[0][0], 'get_list')
self.assertEquals(m.method_calls[0][1][0], 'find_playlists_by_ids')
self.assertEquals(m.method_calls[0][2]['playlist_ids'], ','.join([str(x) for x in TEST_PLAYLIST_IDS]))
@mock.patch('pybrightcove.connection.APIConnection')
def test_find_by_reference_ids(self, ConnectionMock):
m = self._get_list_mock(ConnectionMock)
playlists = playlist.Playlist.find_by_reference_ids(TEST_PLAYLIST_REF_IDS)
for pl in playlists:
print pl
print m.method_calls
self.assertEquals(m.method_calls[0][0], 'get_list')
self.assertEquals(m.method_calls[0][1][0], 'find_playlists_by_reference_ids')
self.assertEquals(m.method_calls[0][2]['reference_ids'], ','.join([str(x) for x in TEST_PLAYLIST_REF_IDS]))
@mock.patch('pybrightcove.connection.APIConnection')
def test_find_for_player_id(self, ConnectionMock):
m = self._get_list_mock(ConnectionMock)
playlists = playlist.Playlist.find_for_player_id(23424255)
for pl in playlists:
print pl
print m.method_calls
self.assertEquals(m.method_calls[0][0], 'get_list')
self.assertEquals(m.method_calls[0][1][0], 'find_playlists_for_player_id')
self.assertEquals(m.method_calls[0][2]['player_id'], 23424255)
@mock.patch('pybrightcove.connection.APIConnection')
def test_find_all(self, ConnectionMock):
m = self._get_list_mock(ConnectionMock)
playlists = playlist.Playlist.find_all()
for pl in playlists:
print pl
self.assertEquals(m.method_calls[0][0], 'get_list')
self.assertEquals(m.method_calls[0][1][0], 'find_all_playlists')
|
zak-k/iris
|
refs/heads/master
|
lib/iris/tests/unit/experimental/stratify/test_relevel.py
|
6
|
# (C) British Crown Copyright 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for the :func:`iris.experimental.stratify.relevel` function.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from functools import partial
import numpy as np
from numpy.testing import assert_array_equal
import iris
from iris.coords import AuxCoord, DimCoord
import iris.tests.stock as stock
try:
import stratify
from iris.experimental.stratify import relevel
except ImportError:
stratify = None
@tests.skip_stratify
class Test(tests.IrisTest):
def setUp(self):
cube = stock.simple_3d()[:, :1, :1]
#: The data from which to get the levels.
self.src_levels = cube.copy()
#: The data to interpolate.
self.cube = cube.copy()
self.cube.rename('foobar')
self.cube *= 10
self.coord = self.src_levels.coord('wibble')
self.axes = (self.coord, self.coord.name(), None, 0)
def test_broadcast_fail_src_levels(self):
emsg = 'Cannot broadcast the cube and src_levels'
data = np.arange(60).reshape(3, 4, 5)
with self.assertRaisesRegexp(ValueError, emsg):
relevel(self.cube, AuxCoord(data), [1, 2, 3])
def test_broadcast_fail_tgt_levels(self):
emsg = 'Cannot broadcast the cube and tgt_levels'
data = np.arange(60).reshape(3, 4, 5)
with self.assertRaisesRegexp(ValueError, emsg):
relevel(self.cube, self.coord, data)
def test_standard_input(self):
for axis in self.axes:
result = relevel(self.cube,
self.src_levels,
[-1, 0, 5.5],
axis=axis)
assert_array_equal(result.data.flatten(),
np.array([np.nan, 0, 55]))
expected = DimCoord([-1, 0, 5.5], units=1, long_name='thingness')
self.assertEqual(expected, result.coord('thingness'))
def test_non_monotonic(self):
for axis in self.axes:
result = relevel(self.cube,
self.src_levels,
[2, 3, 2],
axis=axis)
assert_array_equal(result.data.flatten(),
np.array([20, 30, np.nan]))
expected = AuxCoord([2, 3, 2], units=1, long_name='thingness')
self.assertEqual(result.coord('thingness'), expected)
def test_static_level(self):
for axis in self.axes:
result = relevel(self.cube,
self.src_levels,
[2, 2],
axis=axis)
assert_array_equal(result.data.flatten(), np.array([20, 20]))
def test_coord_input(self):
source = AuxCoord(self.src_levels.data)
source.metadata = self.src_levels.metadata
for axis in self.axes:
result = relevel(self.cube,
source,
[0, 12, 13],
axis=axis)
self.assertEqual(result.shape, (3, 1, 1))
assert_array_equal(result.data.flatten(), [0, 120, np.nan])
def test_custom_interpolator(self):
interpolator = partial(stratify.interpolate, interpolation='nearest')
for axis in self.axes:
result = relevel(self.cube,
self.src_levels,
[-1, 0, 6.5],
axis=axis,
interpolator=interpolator)
assert_array_equal(result.data.flatten(),
np.array([np.nan, 0, 120]))
def test_multi_dim_target_levels(self):
interpolator = partial(stratify.interpolate,
interpolation='linear',
extrapolation='linear')
for axis in self.axes:
result = relevel(self.cube,
self.src_levels,
self.src_levels.data,
axis=axis,
interpolator=interpolator)
assert_array_equal(result.data.flatten(), np.array([0, 120]))
self.assertCML(result)
if __name__ == "__main__":
tests.main()
|
CTSRD-SOAAP/chromium-42.0.2311.135
|
refs/heads/master
|
third_party/mojo/src/mojo/public/tools/bindings/generators/mojom_go_generator.py
|
2
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Generates Go source files from a mojom.Module.'''
from itertools import chain
import os
import re
from mojom.generate.template_expander import UseJinja
import mojom.generate.generator as generator
import mojom.generate.module as mojom
import mojom.generate.pack as pack
class KindInfo(object):
def __init__(self, go_type, encode_suffix, decode_suffix, bit_size):
self.go_type = go_type
self.encode_suffix = encode_suffix
self.decode_suffix = decode_suffix
self.bit_size = bit_size
_kind_infos = {
mojom.BOOL: KindInfo('bool', 'Bool', 'Bool', 1),
mojom.INT8: KindInfo('int8', 'Int8', 'Int8', 8),
mojom.UINT8: KindInfo('uint8', 'Uint8', 'Uint8', 8),
mojom.INT16: KindInfo('int16', 'Int16', 'Int16', 16),
mojom.UINT16: KindInfo('uint16', 'Uint16', 'Uint16', 16),
mojom.INT32: KindInfo('int32', 'Int32', 'Int32', 32),
mojom.UINT32: KindInfo('uint32', 'Uint32', 'Uint32', 32),
mojom.FLOAT: KindInfo('float32', 'Float32', 'Float32', 32),
mojom.HANDLE: KindInfo(
'system.Handle', 'Handle', 'Handle', 32),
mojom.DCPIPE: KindInfo(
'system.ConsumerHandle', 'Handle', 'ConsumerHandle', 32),
mojom.DPPIPE: KindInfo(
'system.ProducerHandle', 'Handle', 'ProducerHandle', 32),
mojom.MSGPIPE: KindInfo(
'system.MessagePipeHandle', 'Handle', 'MessagePipeHandle', 32),
mojom.SHAREDBUFFER: KindInfo(
'system.SharedBufferHandle', 'Handle', 'SharedBufferHandle', 32),
mojom.NULLABLE_HANDLE: KindInfo(
'system.Handle', 'Handle', 'Handle', 32),
mojom.NULLABLE_DCPIPE: KindInfo(
'system.ConsumerHandle', 'Handle', 'ConsumerHandle', 32),
mojom.NULLABLE_DPPIPE: KindInfo(
'system.ProducerHandle', 'Handle', 'ProducerHandle', 32),
mojom.NULLABLE_MSGPIPE: KindInfo(
'system.MessagePipeHandle', 'Handle', 'MessagePipeHandle', 32),
mojom.NULLABLE_SHAREDBUFFER: KindInfo(
'system.SharedBufferHandle', 'Handle', 'SharedBufferHandle', 32),
mojom.INT64: KindInfo('int64', 'Int64', 'Int64', 64),
mojom.UINT64: KindInfo('uint64', 'Uint64', 'Uint64', 64),
mojom.DOUBLE: KindInfo('float64', 'Float64', 'Float64', 64),
mojom.STRING: KindInfo('string', 'String', 'String', 64),
mojom.NULLABLE_STRING: KindInfo('string', 'String', 'String', 64),
}
def GetBitSize(kind):
if isinstance(kind, (mojom.Array, mojom.Map, mojom.Struct)):
return 64
if isinstance(kind, (mojom.InterfaceRequest, mojom.Interface)):
kind = mojom.MSGPIPE
if isinstance(kind, mojom.Enum):
kind = mojom.INT32
return _kind_infos[kind].bit_size
def GetGoType(kind, nullable = True):
if nullable and mojom.IsNullableKind(kind):
return '*%s' % GetNonNullableGoType(kind)
return GetNonNullableGoType(kind)
def GetNonNullableGoType(kind):
if mojom.IsStructKind(kind):
return '%s' % FormatName(kind.name)
if mojom.IsArrayKind(kind):
if kind.length:
return '[%s]%s' % (kind.length, GetGoType(kind.kind))
return '[]%s' % GetGoType(kind.kind)
if mojom.IsMapKind(kind):
return 'map[%s]%s' % (GetGoType(kind.key_kind), GetGoType(kind.value_kind))
if mojom.IsInterfaceKind(kind) or mojom.IsInterfaceRequestKind(kind):
return GetGoType(mojom.MSGPIPE)
if mojom.IsEnumKind(kind):
return GetNameForNestedElement(kind)
return _kind_infos[kind].go_type
def NameToComponent(name):
# insert '_' between anything and a Title name (e.g, HTTPEntry2FooBar ->
# HTTP_Entry2_FooBar)
name = re.sub('([^_])([A-Z][^A-Z_]+)', r'\1_\2', name)
# insert '_' between non upper and start of upper blocks (e.g.,
# HTTP_Entry2_FooBar -> HTTP_Entry2_Foo_Bar)
name = re.sub('([^A-Z_])([A-Z])', r'\1_\2', name)
return [x.lower() for x in name.split('_')]
def UpperCamelCase(name):
return ''.join([x.capitalize() for x in NameToComponent(name)])
def FormatName(name, exported=True):
if exported:
return UpperCamelCase(name)
# Leave '_' symbols for unexported names.
return name[0].lower() + name[1:]
def GetNameForNestedElement(element):
if element.parent_kind:
return "%s_%s" % (GetNameForElement(element.parent_kind),
FormatName(element.name))
return FormatName(element.name)
def GetNameForElement(element, exported=True):
if (mojom.IsInterfaceKind(element) or mojom.IsStructKind(element) or
isinstance(element, (mojom.EnumField,
mojom.Field,
mojom.Method,
mojom.Parameter))):
return FormatName(element.name, exported)
if isinstance(element, (mojom.Enum,
mojom.Constant,
mojom.ConstantValue)):
return GetNameForNestedElement(element)
raise Exception('Unexpected element: %s' % element)
def ExpressionToText(token):
if isinstance(token, mojom.EnumValue):
return "%s_%s" % (GetNameForNestedElement(token.enum),
FormatName(token.name, True))
if isinstance(token, mojom.ConstantValue):
return GetNameForNestedElement(token)
if isinstance(token, mojom.Constant):
return ExpressionToText(token.value)
return token
def DecodeSuffix(kind):
if mojom.IsEnumKind(kind):
return DecodeSuffix(mojom.INT32)
if mojom.IsInterfaceKind(kind) or mojom.IsInterfaceRequestKind(kind):
return DecodeSuffix(mojom.MSGPIPE)
return _kind_infos[kind].decode_suffix
def EncodeSuffix(kind):
if mojom.IsEnumKind(kind):
return EncodeSuffix(mojom.INT32)
if mojom.IsInterfaceKind(kind) or mojom.IsInterfaceRequestKind(kind):
return EncodeSuffix(mojom.MSGPIPE)
return _kind_infos[kind].encode_suffix
def GetPackage(module):
if module.namespace:
return module.namespace.split('.')[-1]
return 'mojom'
def GetPackagePath(module):
path = 'mojom'
for i in module.namespace.split('.'):
path = os.path.join(path, i)
return path
def GetStructFromMethod(method):
params_class = "%s_%s_Params" % (GetNameForElement(method.interface),
GetNameForElement(method))
struct = mojom.Struct(params_class, module=method.interface.module)
for param in method.parameters:
struct.AddField("in%s" % GetNameForElement(param),
param.kind, param.ordinal)
struct.packed = pack.PackedStruct(struct)
struct.bytes = pack.GetByteLayout(struct.packed)
struct.versions = pack.GetVersionInfo(struct.packed)
return struct
def GetResponseStructFromMethod(method):
params_class = "%s_%s_ResponseParams" % (GetNameForElement(method.interface),
GetNameForElement(method))
struct = mojom.Struct(params_class, module=method.interface.module)
for param in method.response_parameters:
struct.AddField("out%s" % GetNameForElement(param),
param.kind, param.ordinal)
struct.packed = pack.PackedStruct(struct)
struct.bytes = pack.GetByteLayout(struct.packed)
struct.versions = pack.GetVersionInfo(struct.packed)
return struct
class Generator(generator.Generator):
go_filters = {
'array': lambda kind: mojom.Array(kind),
'bit_size': GetBitSize,
'decode_suffix': DecodeSuffix,
'encode_suffix': EncodeSuffix,
'go_type': GetGoType,
'expression_to_text': ExpressionToText,
'is_array': mojom.IsArrayKind,
'is_enum': mojom.IsEnumKind,
'is_handle': mojom.IsAnyHandleKind,
'is_map': mojom.IsMapKind,
'is_none_or_empty': lambda array: array == None or len(array) == 0,
'is_nullable': mojom.IsNullableKind,
'is_pointer': mojom.IsObjectKind,
'is_struct': mojom.IsStructKind,
'name': GetNameForElement,
'response_struct_from_method': GetResponseStructFromMethod,
'struct_from_method': GetStructFromMethod,
'tab_indent': lambda s, size = 1: ('\n' + '\t' * size).join(s.splitlines())
}
def GetAllEnums(self):
data = [self.module] + self.GetStructs() + self.module.interfaces
enums = [x.enums for x in data]
return [i for i in chain.from_iterable(enums)]
def GetParameters(self):
return {
'enums': self.GetAllEnums(),
'interfaces': self.module.interfaces,
'package': GetPackage(self.module),
'structs': self.GetStructs(),
}
@UseJinja('go_templates/source.tmpl', filters=go_filters)
def GenerateSource(self):
return self.GetParameters()
def GenerateFiles(self, args):
self.Write(self.GenerateSource(), os.path.join("go", "src", "gen",
GetPackagePath(self.module), '%s.go' % self.module.name))
def GetJinjaParameters(self):
return {
'lstrip_blocks': True,
'trim_blocks': True,
}
def GetGlobals(self):
return {
'namespace': self.module.namespace,
'module': self.module,
}
|
OCA/department
|
refs/heads/8.0
|
project_issue_department/project_issue.py
|
7
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Daniel Reis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class ProjectIssue(orm.Model):
_inherit = 'project.issue'
_columns = {
'department_id': fields.many2one('hr.department', 'Department'),
}
def on_change_project(self, cr, uid, ids, proj_id=False, context=None):
"""When Project is changed: copy it's Department to the issue."""
res = super(ProjectIssue, self).on_change_project(
cr, uid, ids, proj_id, context=context)
res.setdefault('value', {})
if proj_id:
proj = self.pool.get('project.project').browse(
cr, uid, proj_id, context)
dept = getattr(proj, 'department_id', None)
if dept:
res['value'].update({'department_id': dept.id})
else:
res['value'].update({'department_id': None})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mensler/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/module_utils/sub/bam/bam.py
|
298
|
#!/usr/bin/env python
bam = "BAM FROM sub/bam/bam.py"
|
niktre/espressopp
|
refs/heads/master
|
testsuite/FileIOTests/h5md/dump_h5md_test.py
|
3
|
#!/usr/bin/env python
# Copyright (c) 2015-2018
# Jakub Krajniak (jkrajniak at gmail.com)
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressopp
import h5py
import mpi4py.MPI as MPI
import os
import sys
import time
import unittest as ut
def remove_file(file_name):
if os.path.exists(file_name):
os.unlink(file_name)
class TestH5MD(ut.TestCase):
def setUp(self):
self.h5md_file = 'output.h5'
remove_file(self.h5md_file)
self.system, self.integrator = espressopp.standard_system.Default((10., 10., 10.), dt=0.1)
self.particles = [
(1, espressopp.Real3D(1,2,3), 1),
(2, espressopp.Real3D(2,3,4), 2),
(3, espressopp.Real3D(3,4,5), 3),
(4, espressopp.Real3D(4,5,6), 4)]
self.system.storage.addParticles(self.particles, 'id', 'pos', 'type')
def tearDown(self):
remove_file(self.h5md_file)
class TestH5MDNVT(TestH5MD):
def setUp(self):
super(TestH5MDNVT, self).setUp()
self.dump_h5md = espressopp.io.DumpH5MD(
self.system,
self.integrator,
self.h5md_file,
store_species=True,
store_velocity=True,
store_state=True)
def test_particles(self):
"""Checks if positions are written correctly."""
self.dump_h5md.dump()
self.integrator.run(5)
self.dump_h5md.dump()
self.dump_h5md.close()
h5 = h5py.File(self.h5md_file, 'r')
self.assertEqual(h5['/particles/atoms/position/step'][0], 0)
self.assertEqual(h5['/particles/atoms/position/time'][0], 0.0)
self.assertEqual(h5['/particles/atoms/position/step'][1], 5)
self.assertEqual(h5['/particles/atoms/position/time'][1], 0.5)
positions = h5['/particles/atoms/position/value']
for pidx, p in enumerate(positions[0][:len(self.particles)]):
self.assertListEqual(list(p), list(self.particles[pidx][1]))
for pidx, p in enumerate(positions[1][:len(self.particles)]):
self.assertListEqual(list(p), list(self.particles[pidx][1]))
ids = h5['/particles/atoms/id/value']
for id_set in ids:
self.assertListEqual(filter(lambda x: x != -1, id_set), [p[0] for p in self.particles])
types = h5['/particles/atoms/species/value']
for type_set in types:
self.assertListEqual(filter(lambda x: x != -1, type_set), [p[2] for p in self.particles])
def test_check_static_box(self):
self.dump_h5md.dump()
self.integrator.run(5)
self.dump_h5md.dump()
self.dump_h5md.close()
h5 = h5py.File(self.h5md_file, 'r')
self.assertListEqual(list(h5['/particles/atoms/box/edges']), [10.0, 10.0, 10.0])
class TestH5MDDynamicBox(TestH5MD):
def setUp(self):
super(TestH5MDDynamicBox, self).setUp()
self.dump_h5md = espressopp.io.DumpH5MD(
self.system,
self.integrator,
self.h5md_file,
store_species=True,
store_velocity=True,
store_state=True,
static_box=False)
def test_check_dynamic_box(self):
"""Checks if the change of the box is saved properly."""
self.dump_h5md.dump()
self.integrator.run(5)
self.dump_h5md.dump()
self.dump_h5md.close()
h5 = h5py.File(self.h5md_file, 'r')
# Check if the box is saved.
for edg in h5['/particles/atoms/box/edges/value']:
self.assertListEqual(list(edg), [10.0, 10.0, 10.0])
self.assertEqual(len(h5['/particles/atoms/box/edges/value']), 2)
if __name__ == '__main__':
try:
import h5py
ut.main()
except ImportError as ex:
if os.environ.get('TEST_H5MD'): # For travis-ci tests
raise ex
print('Skip DumpH5MD testsuit, h5py module not found')
|
h4ck3rm1k3/pip
|
refs/heads/develop
|
docs/pipext.py
|
52
|
"""pip sphinx extensions"""
import optparse
import sys
from docutils import nodes
from docutils.parsers import rst
from docutils.statemachine import ViewList
from textwrap import dedent
from pip.commands import commands_dict as commands
from pip import cmdoptions
from pip.utils import get_prog
class PipCommandUsage(rst.Directive):
required_arguments = 1
def run(self):
cmd = commands[self.arguments[0]]
prog = '%s %s' % (get_prog(), cmd.name)
usage = dedent(cmd.usage.replace('%prog', prog))
node = nodes.literal_block(usage, usage)
return [node]
class PipCommandDescription(rst.Directive):
required_arguments = 1
def run(self):
node = nodes.paragraph()
node.document = self.state.document
desc = ViewList()
description = dedent(commands[self.arguments[0]].__doc__)
for line in description.split('\n'):
desc.append(line, "")
self.state.nested_parse(desc, 0, node)
return [node]
class PipOptions(rst.Directive):
def _format_option(self, option, cmd_name=None):
if cmd_name:
bookmark_line = ".. _`%s_%s`:" % (cmd_name, option._long_opts[0])
else:
bookmark_line = ".. _`%s`:" % option._long_opts[0]
line = ".. option:: "
if option._short_opts:
line += option._short_opts[0]
if option._short_opts and option._long_opts:
line += ", %s" % option._long_opts[0]
elif option._long_opts:
line += option._long_opts[0]
if option.takes_value():
metavar = option.metavar or option.dest.lower()
line += " <%s>" % metavar.lower()
# fix defaults
opt_help = option.help.replace('%default', str(option.default))
# fix paths with sys.prefix
opt_help = opt_help.replace(sys.prefix, "<sys.prefix>")
return [bookmark_line, "", line, "", " %s" % opt_help, ""]
def _format_options(self, options, cmd_name=None):
for option in options:
if option.help == optparse.SUPPRESS_HELP:
continue
for line in self._format_option(option, cmd_name):
self.view_list.append(line, "")
def run(self):
node = nodes.paragraph()
node.document = self.state.document
self.view_list = ViewList()
self.process_options()
self.state.nested_parse(self.view_list, 0, node)
return [node]
class PipGeneralOptions(PipOptions):
def process_options(self):
self._format_options(
[o() for o in cmdoptions.general_group['options']]
)
class PipIndexOptions(PipOptions):
def process_options(self):
self._format_options(
[o() for o in cmdoptions.index_group['options']]
)
class PipCommandOptions(PipOptions):
required_arguments = 1
def process_options(self):
cmd = commands[self.arguments[0]]()
self._format_options(
cmd.parser.option_groups[0].option_list,
cmd_name=cmd.name,
)
def setup(app):
app.add_directive('pip-command-usage', PipCommandUsage)
app.add_directive('pip-command-description', PipCommandDescription)
app.add_directive('pip-command-options', PipCommandOptions)
app.add_directive('pip-general-options', PipGeneralOptions)
app.add_directive('pip-index-options', PipIndexOptions)
|
bigcommerce/bigcommerce-api-python
|
refs/heads/master
|
bigcommerce/resources/countries.py
|
2
|
from .base import *
class Countries(ListableApiResource, CountableApiResource):
resource_name = 'countries'
def states(self, id=None):
if id:
return CountryStates.get(self.id, id, connection=self._connection)
else:
return CountryStates.all(self.id, connection=self._connection)
class CountryStates(ListableApiSubResource, CountableApiSubResource):
resource_name = 'states'
parent_resource = 'countries'
parent_key = 'country_id'
|
wfxiang08/django178
|
refs/heads/master
|
django/contrib/admindocs/urls.py
|
63
|
from django.conf.urls import patterns, url
from django.contrib.admindocs import views
urlpatterns = patterns('',
url('^$',
views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'),
name='django-admindocs-docroot'),
url('^bookmarklets/$',
views.BookmarkletsView.as_view(),
name='django-admindocs-bookmarklets'),
url('^tags/$',
views.TemplateTagIndexView.as_view(),
name='django-admindocs-tags'),
url('^filters/$',
views.TemplateFilterIndexView.as_view(),
name='django-admindocs-filters'),
url('^views/$',
views.ViewIndexView.as_view(),
name='django-admindocs-views-index'),
url('^views/(?P<view>[^/]+)/$',
views.ViewDetailView.as_view(),
name='django-admindocs-views-detail'),
url('^models/$',
views.ModelIndexView.as_view(),
name='django-admindocs-models-index'),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.ModelDetailView.as_view(),
name='django-admindocs-models-detail'),
url('^templates/(?P<template>.*)/$',
views.TemplateDetailView.as_view(),
name='django-admindocs-templates'),
)
|
Jgarcia-IAS/SITE
|
refs/heads/master
|
addons/point_of_sale/wizard/__init__.py
|
382
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pos_box
import pos_confirm
import pos_details
import pos_discount
import pos_open_statement
import pos_payment
import pos_session_opening
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
beiko-lab/gengis
|
refs/heads/master
|
bin/Lib/test/test_peepholer.py
|
9
|
import dis
import sys
from cStringIO import StringIO
import unittest
def disassemble(func):
f = StringIO()
tmp = sys.stdout
sys.stdout = f
dis.dis(func)
sys.stdout = tmp
result = f.getvalue()
f.close()
return result
def dis_single(line):
return disassemble(compile(line, '', 'single'))
class TestTranforms(unittest.TestCase):
def test_unot(self):
# UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE
def unot(x):
if not x == 2:
del x
asm = disassemble(unot)
for elem in ('UNARY_NOT', 'POP_JUMP_IF_FALSE'):
self.assertNotIn(elem, asm)
self.assertIn('POP_JUMP_IF_TRUE', asm)
def test_elim_inversion_of_is_or_in(self):
for line, elem in (
('not a is b', '(is not)',),
('not a in b', '(not in)',),
('not a is not b', '(is)',),
('not a not in b', '(in)',),
):
asm = dis_single(line)
self.assertIn(elem, asm)
def test_none_as_constant(self):
# LOAD_GLOBAL None --> LOAD_CONST None
def f(x):
None
return x
asm = disassemble(f)
for elem in ('LOAD_GLOBAL',):
self.assertNotIn(elem, asm)
for elem in ('LOAD_CONST', '(None)'):
self.assertIn(elem, asm)
def f():
'Adding a docstring made this test fail in Py2.5.0'
return None
self.assertIn('LOAD_CONST', disassemble(f))
self.assertNotIn('LOAD_GLOBAL', disassemble(f))
def test_while_one(self):
# Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx
def f():
while 1:
pass
return list
asm = disassemble(f)
for elem in ('LOAD_CONST', 'POP_JUMP_IF_FALSE'):
self.assertNotIn(elem, asm)
for elem in ('JUMP_ABSOLUTE',):
self.assertIn(elem, asm)
def test_pack_unpack(self):
for line, elem in (
('a, = a,', 'LOAD_CONST',),
('a, b = a, b', 'ROT_TWO',),
('a, b, c = a, b, c', 'ROT_THREE',),
):
asm = dis_single(line)
self.assertIn(elem, asm)
self.assertNotIn('BUILD_TUPLE', asm)
self.assertNotIn('UNPACK_TUPLE', asm)
def test_folding_of_tuples_of_constants(self):
for line, elem in (
('a = 1,2,3', '((1, 2, 3))'),
('("a","b","c")', "(('a', 'b', 'c'))"),
('a,b,c = 1,2,3', '((1, 2, 3))'),
('(None, 1, None)', '((None, 1, None))'),
('((1, 2), 3, 4)', '(((1, 2), 3, 4))'),
):
asm = dis_single(line)
self.assertIn(elem, asm)
self.assertNotIn('BUILD_TUPLE', asm)
# Bug 1053819: Tuple of constants misidentified when presented with:
# . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . .
# The following would segfault upon compilation
def crater():
(~[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
],)
def test_folding_of_binops_on_constants(self):
for line, elem in (
('a = 2+3+4', '(9)'), # chained fold
('"@"*4', "('@@@@')"), # check string ops
('a="abc" + "def"', "('abcdef')"), # check string ops
('a = 3**4', '(81)'), # binary power
('a = 3*4', '(12)'), # binary multiply
('a = 13//4', '(3)'), # binary floor divide
('a = 14%4', '(2)'), # binary modulo
('a = 2+3', '(5)'), # binary add
('a = 13-4', '(9)'), # binary subtract
('a = (12,13)[1]', '(13)'), # binary subscr
('a = 13 << 2', '(52)'), # binary lshift
('a = 13 >> 2', '(3)'), # binary rshift
('a = 13 & 7', '(5)'), # binary and
('a = 13 ^ 7', '(10)'), # binary xor
('a = 13 | 7', '(15)'), # binary or
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertNotIn('BINARY_', asm)
# Verify that unfoldables are skipped
asm = dis_single('a=2+"b"')
self.assertIn('(2)', asm)
self.assertIn("('b')", asm)
# Verify that large sequences do not result from folding
asm = dis_single('a="x"*1000')
self.assertIn('(1000)', asm)
def test_binary_subscr_on_unicode(self):
# valid code get optimized
asm = dis_single('u"foo"[0]')
self.assertIn("(u'f')", asm)
self.assertNotIn('BINARY_SUBSCR', asm)
asm = dis_single('u"\u0061\uffff"[1]')
self.assertIn("(u'\\uffff')", asm)
self.assertNotIn('BINARY_SUBSCR', asm)
# invalid code doesn't get optimized
# out of range
asm = dis_single('u"fuu"[10]')
self.assertIn('BINARY_SUBSCR', asm)
# non-BMP char (see #5057)
asm = dis_single('u"\U00012345"[0]')
self.assertIn('BINARY_SUBSCR', asm)
def test_folding_of_unaryops_on_constants(self):
for line, elem in (
('`1`', "('1')"), # unary convert
('-0.5', '(-0.5)'), # unary negative
('~-2', '(1)'), # unary invert
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertNotIn('UNARY_', asm)
# Verify that unfoldables are skipped
for line, elem in (
('-"abc"', "('abc')"), # unary negative
('~"abc"', "('abc')"), # unary invert
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertIn('UNARY_', asm)
def test_elim_extra_return(self):
# RETURN LOAD_CONST None RETURN --> RETURN
def f(x):
return x
asm = disassemble(f)
self.assertNotIn('LOAD_CONST', asm)
self.assertNotIn('(None)', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 1)
def test_elim_jump_to_return(self):
# JUMP_FORWARD to RETURN --> RETURN
def f(cond, true_value, false_value):
return true_value if cond else false_value
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
self.assertNotIn('JUMP_ABSOLUTE', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 2)
def test_elim_jump_after_return1(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
if cond1: return 1
if cond2: return 2
while 1:
return 3
while 1:
if cond1: return 4
return 5
return 6
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
self.assertNotIn('JUMP_ABSOLUTE', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 6)
def test_elim_jump_after_return2(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
while 1:
if cond1: return 4
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
# There should be one jump for the while loop.
self.assertEqual(asm.split().count('JUMP_ABSOLUTE'), 1)
self.assertEqual(asm.split().count('RETURN_VALUE'), 2)
def test_main(verbose=None):
import sys
from test import test_support
test_classes = (TestTranforms,)
with test_support.check_py3k_warnings(
("backquote not supported", SyntaxWarning)):
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
fitermay/intellij-community
|
refs/heads/master
|
python/helpers/pydev/_pydev_bundle/_pydev_getopt.py
|
108
|
#=======================================================================================================================
# getopt code copied since gnu_getopt is not available on jython 2.1
#=======================================================================================================================
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
def gnu_getopt(args, shortopts, longopts=[]):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if type('') == type(longopts):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
all_options_first = False
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i + 1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i + 1)
raise GetoptError('option -%s not recognized' % opt, opt)
#=======================================================================================================================
# End getopt code
#=======================================================================================================================
|
Default-Value/defaultvalue.info
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py
|
2736
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
bhargav2408/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/lib2to3/tests/__init__.py
|
308
|
"""Make tests/ into a package. This allows us to "import tests" and
have tests.all_tests be a TestSuite representing all test cases
from all test_*.py files in tests/."""
# Author: Collin Winter
import os
import os.path
import unittest
import types
from . import support
all_tests = unittest.TestSuite()
tests_dir = os.path.join(os.path.dirname(__file__), '..', 'tests')
tests = [t[0:-3] for t in os.listdir(tests_dir)
if t.startswith('test_') and t.endswith('.py')]
loader = unittest.TestLoader()
for t in tests:
__import__("",globals(),locals(),[t],level=1)
mod = globals()[t]
all_tests.addTests(loader.loadTestsFromModule(mod))
|
tic-ull/incidagua
|
refs/heads/master
|
app_django/manage.py
|
1
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
draugiskisprendimai/odoo
|
refs/heads/8.0
|
addons/website_event_track/__init__.py
|
1577
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
|
benoitsteiner/tensorflow-xsmm
|
refs/heads/master
|
tensorflow/python/estimator/inputs/pandas_io_test.py
|
7
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(ValueError,
'shuffle must be provided and explicitly '
'set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
|
janezkranjc/clowdflows
|
refs/heads/master
|
workflows/lemmagen/urls.py
|
107
|
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('',
#url(r'^get-adc-index/widget(?P<widget_id>[0-9]+)/nx/Index.html$', 'workflows.latino.views.get_adc_index', name='get adc index'),
#url(r'^get-adc-index/widget(?P<widget_id>[0-9]+)/(?P<narrow_doc>n?)x/Index.html$', 'workflows.latino.views.get_adc_index', name='get adc index'),
#url(r'^get-adc-index/widget(?P<widget_id>[0-9]+)/(?P<narrow_doc>n?)x/Index(?P<document_id_from>[0-9]+)-(?P<document_id_to>[0-9]+).html$', 'workflows.latino.views.get_adc_index', name='get adc index'),
#url(r'^get-adc-index/widget(?P<widget_id>[0-9]+)/(?P<narrow_doc>n?)x/Document(?P<document_id>[0-9]+).html', 'workflows.latino.views.get_adc_page', name='get adc page'),
)
|
cloudnull/ansible-modules-extras
|
refs/heads/devel
|
notification/twilio.py
|
45
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Makai <matthew.makai@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
version_added: "1.6"
module: twilio
short_description: Sends a text message to a mobile phone through Twilio.
description:
- Sends a text message to a phone number through an the Twilio SMS service.
notes:
- Like the other notification modules, this one requires an external
dependency to work. In this case, you'll need a Twilio account with
a purchased or verified phone number to send the text message.
options:
account_sid:
description:
user's account id for Twilio found on the account page
required: true
auth_token:
description: user's authentication token for Twilio found on the account page
required: true
msg:
description:
the body of the text message
required: true
to_number:
description:
what phone number to send the text message to, format +15551112222
required: true
from_number:
description:
what phone number to send the text message from, format +15551112222
required: true
requirements: [ urllib, urllib2 ]
author: Matt Makai
'''
EXAMPLES = '''
# send a text message from the local server about the build status to (555) 303 5681
# note: you have to have purchased the 'from_number' on your Twilio account
- local_action: text msg="All servers with webserver role are now configured."
account_sid={{ twilio_account_sid }}
auth_token={{ twilio_auth_token }}
from_number=+15552014545 to_number=+15553035681
# send a text message from a server to (555) 111 3232
# note: you have to have purchased the 'from_number' on your Twilio account
- text: msg="This server's configuration is now complete."
account_sid={{ twilio_account_sid }}
auth_token={{ twilio_auth_token }}
from_number=+15553258899 to_number=+15551113232
'''
# =======================================
# text module support methods
#
try:
import urllib, urllib2
except ImportError:
module.fail_json(msg="urllib and urllib2 are required")
import base64
def post_text(module, account_sid, auth_token, msg, from_number, to_number):
URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
% (account_sid,)
AGENT = "Ansible/1.5"
data = {'From':from_number, 'To':to_number, 'Body':msg}
encoded_data = urllib.urlencode(data)
request = urllib2.Request(URI)
base64string = base64.encodestring('%s:%s' % \
(account_sid, auth_token)).replace('\n', '')
request.add_header('User-Agent', AGENT)
request.add_header('Content-type', 'application/x-www-form-urlencoded')
request.add_header('Accept', 'application/ansible')
request.add_header('Authorization', 'Basic %s' % base64string)
return urllib2.urlopen(request, encoded_data)
# =======================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_sid=dict(required=True),
auth_token=dict(required=True),
msg=dict(required=True),
from_number=dict(required=True),
to_number=dict(required=True),
),
supports_check_mode=True
)
account_sid = module.params['account_sid']
auth_token = module.params['auth_token']
msg = module.params['msg']
from_number = module.params['from_number']
to_number = module.params['to_number']
try:
response = post_text(module, account_sid, auth_token, msg,
from_number, to_number)
except Exception, e:
module.fail_json(msg="unable to send text message to %s" % to_number)
module.exit_json(msg=msg, changed=False)
# import module snippets
from ansible.module_utils.basic import *
main()
|
jaredly/pyjamas
|
refs/heads/master
|
examples/timesheet/view/components/FileOpenDlg.py
|
5
|
# vim: set ts=4 sw=4 expandtab:
from ApplicationConstants import Notification
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.DialogBox import DialogBox
from pyjamas.ui.FormPanel import FormPanel
from pyjamas.ui.FileUpload import FileUpload
from pyjamas.ui.HTML import HTML
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui.Frame import Frame
import pyjamas.DOM as DOM
from __pyjamas__ import doc
from pyjamas.Window import alert
from pyjamas import Window
import sys
has_getAsText = True
class FileOpenDlg(DialogBox):
files = None
def __init__(self, left = 50, top = 50, fileLocation = None):
global has_getAsText
try:
DialogBox.__init__(self, modal = False)
self.filename = None
self.data = None
self.setPopupPosition(left, top)
self.dockPanel = DockPanel()
self.dockPanel.setSpacing(4)
self.setText("File Open")
if not fileLocation is None:
msg = HTML("Loading file...", True)
self.dockPanel.add(msg, DockPanel.NORTH)
location = fileLocation
if fileLocation.find("://") < 0:
base = Window.getLocation().getHref()
if base.find('/') >= 0:
sep = '/'
else:
sep = '\\'
base = sep.join(base.split(sep)[:-1]) + sep
location = base + fileLocation
self.iframe = Frame(location)
self.dockPanel.add(self.iframe, DockPanel.CENTER)
else:
msg = HTML("Choose a file", True)
self.browseFile = FileUpload()
elem = self.browseFile.getElement()
if False and has_getAsText and hasattr(elem, 'files'):
self.iframe = None
self.files = elem.files
self.dockPanel.add(self.browseFile, DockPanel.CENTER)
else:
self.browseFile = None
self.files = None
base = '' + doc().location
if base.find('/') >= 0:
sep = '/'
else:
sep = '\\'
if not base.lower()[:5] == "file:":
base = "file:///C:/"
msg = HTML("You'll have to place the application on a local file system, otherwise the browser forbids access.", True)
else:
base = sep.join(base.split(sep)[:-1]) + sep
self.iframe = Frame(base)
self.dockPanel.add(self.iframe, DockPanel.CENTER)
self.dockPanel.add(msg, DockPanel.NORTH)
if self.iframe:
self.iframe.setWidth("36em")
hpanel = HorizontalPanel()
self.openBtn = Button("Open", self.onClickOpen)
hpanel.add(self.openBtn)
self.cancelBtn = Button("Cancel", self.onClickCancel)
hpanel.add(self.cancelBtn)
self.dockPanel.add(hpanel, DockPanel.SOUTH)
self.setWidget(self.dockPanel)
except:
raise
def onClickCancel(self, sender):
self.hide()
def onClickOpen(self, sender):
global has_getAsText
data = None
filename = None
if self.files:
if self.files.length == 0:
return
if self.files.length > 1:
alert("Cannot open more than one file")
return
file = self.files.item(0)
filename = file.fileName
try:
data = file.getAsText("")
except AttributeError, e:
has_getAsText = False
alert("Sorry. cannot retrieve file in this browser.\nTry again.")
else:
elem = self.iframe.getElement()
# On firefox, this runs into:
# Permission denied to get property Window.document
# when the file is not in the current domain
body = elem.contentWindow.document.body
try:
filename = '' + elem.contentWindow.location
except:
filename = None
if body.childNodes.length == 1:
data = '' + body.childNodes.item(0).innerHTML
else:
data = '' + body.innerHTML
self.hide()
if data:
self.data = data
self.filename = filename
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.