repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
cbrucks/keystone_ldap | refs/heads/essex-eol-ldap | keystone/common/wsgi.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import json
import sys
import eventlet
import eventlet.wsgi
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)
import routes
import routes.middleware
import webob
import webob.dec
import webob.exc
from keystone import exception
from keystone.common import logging
from keystone.common import utils
LOG = logging.getLogger(__name__)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.DEBUG):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, application, host=None, port=None, threads=1000):
self.application = application
self.host = host or '0.0.0.0'
self.port = port or 0
self.pool = eventlet.GreenPool(threads)
self.socket_info = {}
self.greenthread = None
def start(self, key=None, backlog=128):
"""Run a WSGI server with the given application."""
LOG.debug('Starting %(arg0)s on %(host)s:%(port)s' %
{'arg0': sys.argv[0],
'host': self.host,
'port': self.port})
socket = eventlet.listen((self.host, self.port), backlog=backlog)
self.greenthread = self.pool.spawn(self._run, self.application, socket)
if key:
self.socket_info[key] = socket.getsockname()
def kill(self):
if self.greenthread:
self.greenthread.kill()
def wait(self):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
except KeyboardInterrupt:
pass
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
log = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
log=WritableLogger(log))
class Request(webob.Request):
pass
class BaseApplication(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls()
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError('You must implement __call__')
class Application(BaseApplication):
@webob.dec.wsgify
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
LOG.debug('arg_dict: %s', arg_dict)
# allow middleware up the stack to provide context & params
context = req.environ.get('openstack.context', {})
context['query_string'] = dict(req.params.iteritems())
params = req.environ.get('openstack.params', {})
params.update(arg_dict)
# TODO(termie): do some basic normalization on methods
method = getattr(self, action)
# NOTE(vish): make sure we have no unicode keys for py2.6.
params = self._normalize_dict(params)
try:
result = method(context, **params)
except exception.Error as e:
LOG.warning(e)
return render_exception(e)
except Exception as e:
logging.exception(e)
return render_exception(exception.UnexpectedError(exception=e))
if result is None:
return render_response(status=(204, 'No Content'))
elif isinstance(result, basestring):
return result
elif isinstance(result, webob.Response):
return result
elif isinstance(result, webob.exc.WSGIHTTPException):
return result
return render_response(body=result)
def _normalize_arg(self, arg):
return str(arg).replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
return dict([(self._normalize_arg(k), v)
for (k, v) in d.iteritems()])
def assert_admin(self, context):
if not context['is_admin']:
try:
user_token_ref = self.token_api.get_token(
context=context, token_id=context['token_id'])
except exception.TokenNotFound:
raise exception.Unauthorized()
creds = user_token_ref['metadata'].copy()
try:
creds['user_id'] = user_token_ref['user'].get('id')
except AttributeError:
logging.debug('Invalid user')
raise exception.Unauthorized()
try:
creds['tenant_id'] = user_token_ref['tenant'].get('id')
except AttributeError:
logging.debug('Invalid tenant')
raise exception.Unauthorized()
# NOTE(vish): this is pretty inefficient
creds['roles'] = [self.identity_api.get_role(context, role)['name']
for role in creds.get('roles', [])]
# Accept either is_admin or the admin role
self.policy_api.enforce(context, creds, 'admin_required', {})
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, request, response):
"""Do whatever you'd like to the response, based on the request."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
response = self.process_request(request)
if response:
return response
response = request.get_response(self.application)
return self.process_response(request, response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
for key, value in req.environ.items():
LOG.debug('%s = %s', key, value)
LOG.debug('')
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
for line in req.body_file:
LOG.debug(line)
LOG.debug('')
resp = req.get_response(self.application)
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
for (key, value) in resp.headers.iteritems():
LOG.debug('%s = %s', key, value)
LOG.debug('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
for part in app_iter:
#sys.stdout.write(part)
LOG.debug(part)
#sys.stdout.flush()
yield part
print
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class ComposingRouter(Router):
def __init__(self, mapper=None, routers=None):
if mapper is None:
mapper = routes.Mapper()
if routers is None:
routers = []
for router in routers:
router.add_routes(mapper)
super(ComposingRouter, self).__init__(mapper)
class ComposableRouter(Router):
"""Router that supports use by ComposingRouter."""
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.add_routes(mapper)
super(ComposableRouter, self).__init__(mapper)
def add_routes(self, mapper):
"""Add routes to given mapper."""
pass
class ExtensionRouter(Router):
"""A router that allows extensions to supplement or overwrite routes.
Expects to be subclassed.
"""
def __init__(self, application, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.application = application
self.add_routes(mapper)
mapper.connect('{path_info:.*}', controller=self.application)
super(ExtensionRouter, self).__init__(mapper)
def add_routes(self, mapper):
pass
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app)
return _factory
def render_response(body=None, status=(200, 'OK'), headers=None):
"""Forms a WSGI response."""
resp = webob.Response()
resp.status = '%s %s' % status
resp.headerlist = headers or [('Content-Type', 'application/json'),
('Vary', 'X-Auth-Token')]
if body is not None:
resp.body = json.dumps(body, cls=utils.SmarterEncoder)
return resp
def render_exception(error):
"""Forms a WSGI response based on the current error."""
return render_response(status=(error.code, error.title), body={
'error': {
'code': error.code,
'title': error.title,
'message': str(error),
}
})
|
c-o-m-m-a-n-d-e-r/CouchPotatoServer | refs/heads/master | libs/tornado/ioloop.py | 65 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado import stack_context
from tornado.util import Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.platform.auto import set_close_exec, Waker
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server::
import errno
import functools
import ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error, e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = ioloop.IOLoop.instance()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self):
if IOLoop.current(instance=False) is None:
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
If the function returns a `.Future`, the `IOLoop` will run
until the future is resolved. If it raises an exception, the
`IOLoop` will stop and the exception will be re-raised to the
caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.instance().run_sync(main)
"""
future_cell = [None]
def run():
try:
result = func()
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None and is_future(ret):
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
self.add_future(ret, lambda f: f.result())
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None):
super(PollIOLoop, self).initialize()
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError: # non-main thread
pass
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512
and self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for callback in callbacks:
self._run_callback(callback)
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
with self._callback_lock:
if self._closing:
raise RuntimeError("IOLoop is closing")
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty and thread.get_ident() != self._thread_ident:
# If we're in the IOLoop's thread, we know it's not currently
# polling. If we're not, and we added the first callback to an
# empty list, we may need to wake it up (it may wake up on its
# own, but an occasional extra wake is harmless). Waking
# up a polling IOLoop is relatively expensive, so we try to
# avoid it when we can.
self._waker.wake()
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
if thread.get_ident() != self._thread_ident:
# if the signal is handled on another thread, we can add
# it normally (modulo the NullContext)
self.add_callback(callback, *args, **kwargs)
else:
# If we're on the IOLoop's thread, we cannot use
# the regular add_callback because it may deadlock on
# _callback_lock. Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the signal interrupted the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tiebreaker']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, self.tiebreaker) <
(other.deadline, other.tiebreaker))
def __le__(self, other):
return ((self.deadline, self.tiebreaker) <=
(other.deadline, other.tiebreaker))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
ibrahimcesar/panda | refs/heads/master | panda/api/exports.py | 6 | #!/usr/bin/env python
from mimetypes import guess_type
from django.conf.urls.defaults import url
from django.core.servers.basehttp import FileWrapper
from django.http import HttpResponse
from tastypie import fields
from tastypie.authorization import DjangoAuthorization
from tastypie.utils.urls import trailing_slash
from panda.api.utils import PandaAuthentication, PandaModelResource, PandaSerializer
from panda.models import Export
class ExportResource(PandaModelResource):
"""
API resource for Exports.
"""
from panda.api.users import UserResource
creator = fields.ForeignKey(UserResource, 'creator')
dataset = fields.ForeignKey('panda.api.datasets.DatasetResource', 'dataset', null=True)
class Meta:
queryset = Export.objects.all()
resource_name = 'export'
allowed_methods = ['get']
authentication = PandaAuthentication()
authorization = DjangoAuthorization()
serializer = PandaSerializer()
def override_urls(self):
"""
Add urls for search endpoint.
"""
return [
url(r'^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$' % (self._meta.resource_name, trailing_slash()), self.wrap_view('download'), name='api_download_export'),
]
def download(self, request, **kwargs):
"""
Download the original file that was uploaded.
"""
# Allow POST so csrf token can come through
self.method_check(request, allowed=['get', 'post'])
self.is_authenticated(request)
self.throttle_check(request)
if 'pk' in kwargs:
get_id = kwargs['pk']
else:
get_id = request.GET.get('id', '')
export = Export.objects.get(id=get_id)
path = export.get_path()
self.log_throttled_access(request)
response = HttpResponse(FileWrapper(open(path, 'r')), content_type=guess_type(export.filename)[0])
response['Content-Disposition'] = 'attachment; filename=%s' % export.filename
response['Content-Length'] = export.size
return response
|
eprincev-egor/nw.js | refs/heads/nw13 | tests/automatic_tests/start_app/zip.py | 97 | import zipfile
import os
zip = zipfile.ZipFile(os.path.join('tmp-nw', 'app.nw'), 'w',
compression=zipfile.ZIP_DEFLATED)
source_file = ['index.html', 'package.json']
for file in source_file:
path = os.path.join('tmp-nw', file)
zip.write(path, file)
zip.close();
|
detiber/ansible | refs/heads/devel | contrib/inventory/serf.py | 395 | #!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use nodes discovered by Serf
# (https://serfdom.io/).
#
# Requires the `serfclient` Python module from
# https://pypi.python.org/pypi/serfclient
#
# Environment variables
# ---------------------
# - `SERF_RPC_ADDR`
# - `SERF_RPC_AUTH`
#
# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
import argparse
import collections
import os
import sys
# https://pypi.python.org/pypi/serfclient
from serfclient import SerfClient, EnvironmentConfig
try:
import json
except ImportError:
import simplejson as json
_key = 'serf'
def _serf_client():
env = EnvironmentConfig()
return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
def get_serf_members_data():
return _serf_client().members().body['Members']
def get_nodes(data):
return [node['Name'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for key, value in node['Tags'].items():
groups[value].append(node['Name'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['Name']] = node['Tags']
return meta
def print_list():
data = get_serf_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_serf_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from serf cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from serf cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])
|
MilesDuronCIMAT/book_exercises | refs/heads/master | chapter_11/lists/__init__.py | 12133432 | |
manassolanki/frappe | refs/heads/develop | frappe/core/doctype/user_permission/__init__.py | 12133432 | |
CUCWD/edx-platform | refs/heads/master | lms/djangoapps/badges/events/__init__.py | 12133432 | |
timhughes/gnome15 | refs/heads/master | src/gnome15/objgraph.py | 8 | """
Ad-hoc tools for drawing Python object reference graphs with graphviz.
This module is more useful as a repository of sample code and ideas, than
as a finished product. For documentation and background, read
http://mg.pov.lt/blog/hunting-python-memleaks.html
http://mg.pov.lt/blog/python-object-graphs.html
http://mg.pov.lt/blog/object-graphs-with-graphviz.html
in that order. Then use pydoc to read the docstrings, as there were
improvements made since those blog posts.
Copyright (c) 2008 Marius Gedminas <marius@pov.lt>
Released under the MIT licence.
Changes
=======
1.1dev (2008-09-05)
-------------------
New function: show_refs() for showing forward references.
New functions: typestats() and show_most_common_types().
Object boxes are less crammed with useless information (such as IDs).
Spawns xdot if it is available.
"""
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__author__ = "Marius Gedminas (marius@gedmin.as)"
__copyright__ = "Copyright (c) 2008 Marius Gedminas"
__license__ = "MIT"
__version__ = "1.1dev"
__date__ = "2008-09-05"
import gc
import inspect
import types
import weakref
import operator
import os
def count(typename):
"""Count objects tracked by the garbage collector with a given class name.
Example:
>>> count('dict')
42
>>> count('MyClass')
3
Note that the GC does not track simple objects like int or str.
"""
return sum(1 for o in gc.get_objects() if type(o).__name__ == typename)
def typestats():
"""Count the number of instances for each type tracked by the GC.
Note that the GC does not track simple objects like int or str.
Note that classes with the same name but defined in different modules
will be lumped together.
"""
stats = {}
for o in gc.get_objects():
stats.setdefault(type(o).__name__, 0)
stats[type(o).__name__] += 1
return stats
def show_most_common_types(limit=10):
"""Count the names of types with the most instances.
Note that the GC does not track simple objects like int or str.
Note that classes with the same name but defined in different modules
will be lumped together.
"""
stats = sorted(typestats().items(), key=operator.itemgetter(1),
reverse=True)
if limit:
stats = stats[:limit]
width = max(len(name) for name, count in stats)
for name, count in stats[:limit]:
print name.ljust(width), count
def by_type(typename):
"""Return objects tracked by the garbage collector with a given class name.
Example:
>>> by_type('MyClass')
[<mymodule.MyClass object at 0x...>]
Note that the GC does not track simple objects like int or str.
"""
return [o for o in gc.get_objects() if type(o).__name__ == typename]
def at(addr):
"""Return an object at a given memory address.
The reverse of id(obj):
>>> at(id(obj)) is obj
True
Note that this function does not work on objects that are not tracked by
the GC (e.g. ints or strings).
"""
for o in gc.get_objects():
if id(o) == addr:
return o
return None
def find_backref_chain(obj, predicate, max_depth=20, extra_ignore=()):
"""Find a shortest chain of references leading to obj.
The start of the chain will be some object that matches your predicate.
``max_depth`` limits the search depth.
``extra_ignore`` can be a list of object IDs to exclude those objects from
your search.
Example:
>>> find_backref_chain(obj, inspect.ismodule)
[<module ...>, ..., obj]
Returns None if such a chain could not be found.
"""
queue = [obj]
depth = {id(obj): 0}
parent = {id(obj): None}
ignore = set(extra_ignore)
ignore.add(id(extra_ignore))
ignore.add(id(queue))
ignore.add(id(depth))
ignore.add(id(parent))
ignore.add(id(ignore))
gc.collect()
while queue:
target = queue.pop(0)
if predicate(target):
chain = [target]
while parent[id(target)] is not None:
target = parent[id(target)]
chain.append(target)
return chain
tdepth = depth[id(target)]
if tdepth < max_depth:
referrers = gc.get_referrers(target)
ignore.add(id(referrers))
for source in referrers:
if inspect.isframe(source) or id(source) in ignore:
continue
if id(source) not in depth:
depth[id(source)] = tdepth + 1
parent[id(source)] = target
queue.append(source)
return None # not found
def show_backrefs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename = 'objgraph'):
"""Generate an object reference graph ending at ``objs``
The graph will show you what objects refer to ``objs``, directly and
indirectly.
``objs`` can be a single object, or it can be a list of objects.
Produces a Graphviz .dot file and spawns a viewer (xdot) if one is
installed, otherwise converts the graph to a .png image.
Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the
graph.
Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to
remove undesired objects from the graph.
Use ``highlight`` (a predicate) to highlight certain graph nodes in blue.
Examples:
>>> show_backrefs(obj)
>>> show_backrefs([obj1, obj2])
>>> show_backrefs(obj, max_depth=5)
>>> show_backrefs(obj, filter=lambda x: not inspect.isclass(x))
>>> show_backrefs(obj, highlight=inspect.isclass)
>>> show_backrefs(obj, extra_ignore=[id(locals())])
"""
show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore,
filter=filter, too_many=too_many, highlight=highlight,
edge_func=gc.get_referrers, swap_source_target=False, filename = filename)
def show_refs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None):
"""Generate an object reference graph starting at ``objs``
The graph will show you what objects are reachable from ``objs``, directly
and indirectly.
``objs`` can be a single object, or it can be a list of objects.
Produces a Graphviz .dot file and spawns a viewer (xdot) if one is
installed, otherwise converts the graph to a .png image.
Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the
graph.
Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to
remove undesired objects from the graph.
Use ``highlight`` (a predicate) to highlight certain graph nodes in blue.
Examples:
>>> show_refs(obj)
>>> show_refs([obj1, obj2])
>>> show_refs(obj, max_depth=5)
>>> show_refs(obj, filter=lambda x: not inspect.isclass(x))
>>> show_refs(obj, highlight=inspect.isclass)
>>> show_refs(obj, extra_ignore=[id(locals())])
"""
show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore,
filter=filter, too_many=too_many, highlight=highlight,
edge_func=gc.get_referents, swap_source_target=True)
#
# Internal helpers
#
def show_graph(objs, edge_func, swap_source_target,
max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename = 'objects'):
if not isinstance(objs, (list, tuple)):
objs = [objs]
f = file('%s.dot' % filename, 'w')
print >> f, 'digraph ObjectGraph {'
print >> f, ' node[shape=box, style=filled, fillcolor=white];'
queue = []
depth = {}
ignore = set(extra_ignore)
ignore.add(id(objs))
ignore.add(id(extra_ignore))
ignore.add(id(queue))
ignore.add(id(depth))
ignore.add(id(ignore))
for obj in objs:
print >> f, ' %s[fontcolor=red];' % (obj_node_id(obj))
depth[id(obj)] = 0
queue.append(obj)
gc.collect()
nodes = 0
while queue:
nodes += 1
target = queue.pop(0)
tdepth = depth[id(target)]
print >> f, ' %s[label="%s"];' % (obj_node_id(target), obj_label(target, tdepth))
h, s, v = gradient((0, 0, 1), (0, 0, .3), tdepth, max_depth)
if inspect.ismodule(target):
h = .3
s = 1
if highlight and highlight(target):
h = .6
s = .6
v = 0.5 + v * 0.5
print >> f, ' %s[fillcolor="%g,%g,%g"];' % (obj_node_id(target), h, s, v)
if v < 0.5:
print >> f, ' %s[fontcolor=white];' % (obj_node_id(target))
if inspect.ismodule(target) or tdepth >= max_depth:
continue
neighbours = edge_func(target)
ignore.add(id(neighbours))
n = 0
for source in neighbours:
if inspect.isframe(source) or id(source) in ignore:
continue
if filter and not filter(source):
continue
if swap_source_target:
srcnode, tgtnode = target, source
else:
srcnode, tgtnode = source, target
elabel = edge_label(srcnode, tgtnode)
print >> f, ' %s -> %s%s;' % (obj_node_id(srcnode), obj_node_id(tgtnode), elabel)
if id(source) not in depth:
depth[id(source)] = tdepth + 1
queue.append(source)
n += 1
if n >= too_many:
print >> f, ' %s[color=red];' % obj_node_id(target)
break
print >> f, "}"
f.close()
print "Graph written to objects.dot (%d nodes)" % nodes
if os.system('which xdot >/dev/null') == 0:
print "Spawning graph viewer (xdot)"
os.system("xdot %s.dot &" % filename)
else:
os.system("dot -Tpng %s.dot > %s.png" % (filename, filename))
print "Image generated as objects.png"
def obj_node_id(obj):
if isinstance(obj, weakref.ref):
return 'all_weakrefs_are_one'
return ('o%d' % id(obj)).replace('-', '_')
def obj_label(obj, depth):
return quote(type(obj).__name__ + ':\n' +
safe_repr(obj))
def quote(s):
return s.replace("\\", "\\\\").replace("\"", "\\\"").replace("\n", "\\n")
def safe_repr(obj):
try:
return short_repr(obj)
except:
return '(unrepresentable)'
def short_repr(obj):
if isinstance(obj, (type, types.ModuleType, types.BuiltinMethodType,
types.BuiltinFunctionType)):
return obj.__name__
if isinstance(obj, types.MethodType):
if obj.im_self is not None:
return obj.im_func.__name__ + ' (bound)'
else:
return obj.im_func.__name__
if isinstance(obj, (tuple, list, dict, set)):
return '%d items' % len(obj)
if isinstance(obj, weakref.ref):
return 'all_weakrefs_are_one'
return repr(obj)[:40]
def gradient(start_color, end_color, depth, max_depth):
if max_depth == 0:
# avoid division by zero
return start_color
h1, s1, v1 = start_color
h2, s2, v2 = end_color
f = float(depth) / max_depth
h = h1 * (1-f) + h2 * f
s = s1 * (1-f) + s2 * f
v = v1 * (1-f) + v2 * f
return h, s, v
def edge_label(source, target):
if isinstance(target, dict) and target is getattr(source, '__dict__', None):
return ' [label="__dict__",weight=10]'
elif isinstance(source, dict):
for k, v in source.iteritems():
if v is target:
if isinstance(k, basestring) and k:
return ' [label="%s",weight=2]' % quote(k)
else:
return ' [label="%s"]' % quote(safe_repr(k))
return ''
|
kennethreitz-archive/mead | refs/heads/master | mead/plugins/index/controllers/about.py | 1 | # Part of Mead. See LICENSE file for full copyright and licensing details.
from mead.plugins.index import mead_index
from flask import render_template
@mead_index.route('/about')
def about():
return render_template('about.html')
|
ddimensia/RaceCapture_App | refs/heads/master | test/__init__.py | 12133432 | |
dydek/django | refs/heads/master | django/conf/locale/pt_BR/__init__.py | 12133432 | |
zasdfgbnm/tensorflow | refs/heads/master | tensorflow/contrib/framework/python/ops/critical_section_test.py | 6 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""critical section tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import critical_section_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
# TODO(ebrevdo): Re-enable once CriticalSection is in core.
# from tensorflow.python.training import saver as saver_lib
class CriticalSectionTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testCreateCriticalSection(self):
cs = critical_section_ops.CriticalSection(name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
def fn(a, b):
c = v.read_value()
with ops.control_dependencies([c]):
nv = v.assign_add(a * b)
with ops.control_dependencies([nv]):
return array_ops.identity(c)
num_concurrent = 1000
r = [cs.execute(fn, 1.0, 2.0) for _ in range(num_concurrent)]
self.evaluate(v.initializer)
r_value = self.evaluate(r)
self.assertAllClose([2.0 * i for i in range(num_concurrent)],
sorted(r_value))
@test_util.run_in_graph_and_eager_modes()
def testCreateCriticalSectionFnReturnsOp(self):
cs = critical_section_ops.CriticalSection(name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
def fn_return_op(a, b):
c = v.read_value()
with ops.control_dependencies([c]):
nv = v.assign_add(a * b)
with ops.control_dependencies([nv]):
return ()
num_concurrent = 100
r = [cs.execute(fn_return_op, 1.0, 2.0) for _ in range(num_concurrent)]
self.evaluate(v.initializer)
self.evaluate(r)
final_v = self.evaluate(v)
self.assertAllClose(2.0 * num_concurrent, final_v)
def testCreateCriticalSectionRaw(self):
cs = critical_section_ops.CriticalSection(name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
@function.Defun(dtypes.float32, dtypes.float32)
def fn(a, b):
c = v.read_value()
with ops.control_dependencies([c]):
nv = v.assign_add(a * b)
with ops.control_dependencies([nv]):
return array_ops.identity(c)
def execute(fn, *args):
output_args = fn.definition.signature.output_arg
return resource_variable_ops.execute_in_critical_section(
critical_section=cs._handle,
arguments=list(args) + fn.captured_inputs,
f=fn,
output_types=[out.type for out in output_args],
output_shapes=[tensor_shape.TensorShape(None) for _ in output_args])
num_concurrent = 1000
r = [execute(fn, 1.0, 2.0)[0] for _ in range(num_concurrent)]
self.evaluate(v.initializer)
r_value = self.evaluate(r)
self.assertAllClose([2.0 * i for i in range(num_concurrent)],
sorted(r_value))
def testCollection(self):
cs = critical_section_ops.CriticalSection(name="cs")
self.assertIn(
cs, ops.get_collection(critical_section_ops.CRITICAL_SECTIONS))
execute_op = cs.execute(lambda x: x + 1, 1.0).op
self.assertIn(
execute_op,
[signature.op for signature in
ops.get_collection(critical_section_ops.CRITICAL_SECTION_EXECUTIONS)])
@test_util.run_in_graph_and_eager_modes()
def testRecursiveCriticalSectionAccessIsIllegal(self):
cs = critical_section_ops.CriticalSection(name="cs")
def fn(x):
return cs.execute(lambda x: x+1, x)
with self.assertRaisesRegexp(
ValueError,
r"attempts to access the CriticalSection in which it would be running"):
cs.execute(fn, 1.0)
def testMultipleCSExecutionsRequestSameResource(self):
cs0 = critical_section_ops.CriticalSection()
cs1 = critical_section_ops.CriticalSection()
v = resource_variable_ops.ResourceVariable(0.0, name="v")
cs0.execute(lambda: v + 1)
# It's OK for the same CriticalSection to access this resource.
cs0.execute(lambda: v - 1)
# It's *not* OK for a different CriticalSection to access it by
# default.
with self.assertRaisesRegexp(
ValueError, "requested exclusive resource access"):
cs1.execute(lambda: v + 1)
# It's not even OK if the second call doesn't request exclusive access.
with self.assertRaisesRegexp(
ValueError, "requested exclusive resource access"):
cs1.execute(lambda: v + 1, exclusive_resource_access=False)
v2 = resource_variable_ops.ResourceVariable(0.0, name="v2")
cs0.execute(lambda: v2 + 1, exclusive_resource_access=False)
# It's OK if neither requests exclusive resource access.
cs1.execute(lambda: v2 + 1, exclusive_resource_access=False)
# It's not OK if the second request requires exlusive resource
# access.
with self.assertRaisesRegexp(
ValueError, "requested exclusive resource access"):
cs1.execute(lambda: v2 + 1)
# TODO(ebrevdo): Re-enable once CriticalSection is in core.
#
# def testCriticalSectionAndExecuteOpSaverRoundTrip(self):
# cs = critical_section_ops.CriticalSection()
# r = cs.execute(lambda x: x + 1, 1.0)
# graph = ops.get_default_graph()
# meta_graph = saver_lib.export_meta_graph(
# graph=graph, collection_list=graph.get_all_collection_keys())
# graph_copy = ops.Graph()
# with graph_copy.as_default():
# _ = saver_lib.import_meta_graph(meta_graph, import_scope="imported")
# restored_cs = ops.get_collection(critical_section_ops.CRITICAL_SECTIONS)
# restored_exec = ops.get_collection(
# critical_section_ops.CRITICAL_SECTION_EXECUTIONS)
# self.assertEqual(1, len(restored_cs))
# self.assertEqual(1, len(restored_exec))
# self.assertEqual(restored_cs[0].name, "imported/%s" % cs.name)
# self.assertEqual(restored_exec[0].op.name, "imported/%s" % r.op.name)
# def testToProto(self):
# cs = critical_section_ops.CriticalSection(name="cs")
# proto = cs.to_proto()
# self.assertEqual(proto.critical_section_name, cs._handle.name)
# cs_copy = critical_section_ops.CriticalSection.from_proto(proto)
# self.assertEqual(cs_copy._handle, cs._handle)
if __name__ == "__main__":
test.main()
|
loic/django | refs/heads/master | tests/template_tests/templatetags/subpackage/echo.py | 580 | from django import template
register = template.Library()
@register.simple_tag
def echo2(arg):
return arg
|
jmgonzalez00449/Arduino | refs/heads/master | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/util.py | 189 | # urllib3/util.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from base64 import b64encode
from collections import namedtuple
from socket import error as SocketError
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, SSLError, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
from .packages import six
from .exceptions import LocationParseError
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this imeplementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
auth, url = url.split('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url[1:].split(']', 1)
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
return headers
def is_connection_dropped(conn):
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: # Platform-specific: AppEngine
return False
if not poll: # Platform-specific
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception as e: # Reraise as SSLError
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)
|
pombredanne/pants | refs/heads/master | src/python/pants/backend/jvm/tasks/nailgun_task.py | 4 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.base.exceptions import TaskError
from pants.java import util
from pants.java.executor import SubprocessExecutor
from pants.java.jar.jar_dependency import JarDependency
from pants.java.nailgun_executor import NailgunExecutor, NailgunProcessGroup
from pants.pantsd.subsystem.subprocess import Subprocess
from pants.task.task import Task, TaskBase
class NailgunTaskBase(JvmToolTaskMixin, TaskBase):
ID_PREFIX = 'ng'
@classmethod
def register_options(cls, register):
super(NailgunTaskBase, cls).register_options(register)
register('--use-nailgun', type=bool, default=True,
help='Use nailgun to make repeated invocations of this task quicker.')
register('--nailgun-timeout-seconds', advanced=True, default=10, type=float,
help='Timeout (secs) for nailgun startup.')
register('--nailgun-connect-attempts', advanced=True, default=5, type=int,
help='Max attempts for nailgun connects.')
cls.register_jvm_tool(register,
'nailgun-server',
classpath=[
JarDependency(org='com.martiansoftware',
name='nailgun-server',
rev='0.9.1'),
])
@classmethod
def subsystem_dependencies(cls):
return super(NailgunTaskBase, cls).subsystem_dependencies() + (Subprocess.Factory,)
def __init__(self, *args, **kwargs):
"""
:API: public
"""
super(NailgunTaskBase, self).__init__(*args, **kwargs)
id_tuple = (self.ID_PREFIX, self.__class__.__name__)
self._identity = '_'.join(id_tuple)
self._executor_workdir = os.path.join(self.context.options.for_global_scope().pants_workdir,
*id_tuple)
def create_java_executor(self):
"""Create java executor that uses this task's ng daemon, if allowed.
Call only in execute() or later. TODO: Enforce this.
"""
if self.get_options().use_nailgun:
classpath = os.pathsep.join(self.tool_classpath('nailgun-server'))
return NailgunExecutor(self._identity,
self._executor_workdir,
classpath,
self.dist,
connect_timeout=self.get_options().nailgun_timeout_seconds,
connect_attempts=self.get_options().nailgun_connect_attempts)
else:
return SubprocessExecutor(self.dist)
def runjava(self, classpath, main, jvm_options=None, args=None, workunit_name=None,
workunit_labels=None, workunit_log_config=None):
"""Runs the java main using the given classpath and args.
If --no-use-nailgun is specified then the java main is run in a freshly spawned subprocess,
otherwise a persistent nailgun server dedicated to this Task subclass is used to speed up
amortized run times.
:API: public
"""
executor = self.create_java_executor()
# Creating synthetic jar to work around system arg length limit is not necessary
# when `NailgunExecutor` is used because args are passed through socket, therefore turning off
# creating synthetic jar if nailgun is used.
create_synthetic_jar = not self.get_options().use_nailgun
try:
return util.execute_java(classpath=classpath,
main=main,
jvm_options=jvm_options,
args=args,
executor=executor,
workunit_factory=self.context.new_workunit,
workunit_name=workunit_name,
workunit_labels=workunit_labels,
workunit_log_config=workunit_log_config,
create_synthetic_jar=create_synthetic_jar,
synthetic_jar_dir=self._executor_workdir)
except executor.Error as e:
raise TaskError(e)
# TODO(John Sirois): This just prevents ripple - maybe inline
class NailgunTask(NailgunTaskBase, Task):
"""
:API: public
"""
pass
class NailgunKillall(Task):
"""Kill running nailgun servers."""
@classmethod
def register_options(cls, register):
super(NailgunKillall, cls).register_options(register)
register('--everywhere', type=bool,
help='Kill all nailguns servers launched by pants for all workspaces on the system.')
def execute(self):
NailgunProcessGroup().killall(everywhere=self.get_options().everywhere)
|
xhat/micropython | refs/heads/master | tests/basics/subclass_native5.py | 117 | # Subclass from 2 bases explicitly subclasses from object
class Base1(object):
pass
class Base2(object):
pass
class Sub(Base1, Base2):
pass
o = Sub()
|
joeryan/web-data | refs/heads/master | websock1.py | 1 | # simple web socket access
# get the data at a specific uri and print to std out
import socket, sys, re
websock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print sys.argv
if len(sys.argv) > 1:
optionalurl = sys.argv[1]
urlmatch = re.match(r"(http://.+?/) (.+)", optionalurl)
print urlmatch
if (urlmatch):
website = urlmatch.group(1)
page = urlmatch.group(2)
print (website +'/' + page)
else:
print "Bad url passed as command line option"
sys.exit()
else:
website = "www.py4inf.com"
page = 'code/romeo.txt'
print website
websock.connect((website, 80))
websock.send('GET http://' + website + '/' + page + ' 1.1 \n\n')
while True:
data = websock.recv(512)
if len(data) < 1:
break
print data
websock.close()
|
kreopt/aioweb | refs/heads/master | tests/wyrm/files/insert_in_python_r9.py | 2 | from orator.migrations import Migration
class AcidTest(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.table('shits') as table:
self.integer("lsd_quality")
def down(self):
"""
Revert the migrations.
"""
with self.schema.table('shits') as table:
#for i in [1,1,2,3]:
# print(i)
pass
|
h-utkuunlu/tubitak-uav | refs/heads/master | gorev_yazilim/gorev_1_otonom.py | 1 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
print "Görev 1 Otonom"
from __future__ import print_function
from dronekit import connect, VehicleMode, LocationGlobalRelative, LocationGlobal
from time import sleep, time
import numpy as np
import cv2
import imutils
import math
import picamera
import tanimlar as t
###############################################################################
### Değişken kurulumu
## Başlangıç değerleri
sayac = 0
#hava_hiz = 10
## Hedef tanımları
irtifa = 8 # hesaplama: irtifa = ((matris eni)/2)/(arctan(24.4 derece)) . 5 metre matris eni icin h = 5.51. 6 - 10 arası bir irtifanın iş görmesi gerekir. Matris yüzeyinden baz alarak
matris_konum = LocationGlobalRelative(40.087013, 32.594787, irtifa)
teslim_konum = LocationGlobalRelative(40.084942, 32.593614, irtifa)
dogrultu = 230
skeep(10)
### Drone ile iletişim
drone = connect('/dev/ttyACM0', wait_ready=True)
### Drone Hareketleri
## Kalkış & Ev konumu belirleme
(ev_konum, ev_dogrultu) = t.hazirlik_ve_kalkis(drone, irtifa)
## Kalkış sonrası matrisin üzerine uç
drone.simple_goto(matris_konum)
while t.hedef_varis(drone, matris_konum):
sleep(1)
t.dogrultu_duzelt(drone, ev_dogrultu)
# Kamerayı başlat ve görüntüyü işle
kamera = picamera.PiCamera()
yeni_set = True
gorev_baslangic = time()
while True:
# goruntu = cv2.imread('color_grab.png', -1) # Kamera ile değiştirilecek
dosya_adi = datetime.datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H-%M-%S') + '.jpg'
kamera.capture(dosya_adi)
goruntu = cv2.imread(dosya_adi, -1)
matris = t.goruntu_isleme(goruntu)
if matris != 'Hata': # 16 kare bulundu
if (matris == 'k'*16 or matris == 's'*16 or matris == 'm'*16):
print("Kareler değişim düzeninde")
yeni_set = True
sleep(2)
elif yeni_set: #
print("Kareler belirlendi. Yazılıyor")
yeni_set = False
t.sd_kart(matris)
sayac += 1
sleep(2)
else:
print("Kareler henüz değişmedi")
sleep(1)
else:
sleep(0.05)
if sayac >= 3: # değişecek
break
if gorev_baslangic - time() > 240: # Gorev baslayali 4 dakika oldu. Bir sorun var demektir. Drone'u geri cagir
break
### Görev tamam
print('Görev tamam')
kamera.close()
## Kalkış konumuna git
drone.mode = VehicleMode("RTL")
while t.hedef_varis(drone, ev_konum):
sleep(1)
print("Kalkış noktasına ulaşıldı")
## İniş
drone.mode = VehicleMode("LAND")
while drone.location.global_relative_frame.alt > 0.25:
print(drone.location.global_relative_frame.alt)
sleep(0.5)
print("İniş gerçekleşti")
## Cihaz kapatma komutları
print("Kapatılıyor")
cv2.destroyAllWindows()
drone.close()
print("Kapatma başarılı. Program sona erdi")
|
linearregression/socorro | refs/heads/master | socorro/unittest/database/createSchema.py | 11 | #! /usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# XXX Set to be deprecated in favor of socorro/external/postgresql/models.py
"""
Just set up the database and exit. Assume we can get config details from the test config file, but allow sys.argv to override
"""
import logging
import sys
import socorro.lib.ConfigurationManager as configurationManager
from socorro.unittest.testlib.testDB import TestDB
import dbTestconfig as testConfig
def help():
print """Usage: (python) createSchema.py [config-options] [--help]
First removes all the known socorro tables, then creates an instance of
the current socorro schema in an existing database. Does NOT drop tables
other than the ones known to this schema.
Default: use current unittest config for host, database, user and password.
--help: print this message and exit
config-options: You may pass any of the following:
[--]host=someHostName
[--]dbname=someDatabaseName
[--]user=someUserName
[--]password=somePassword
"""
def main():
logger = logging.getLogger("topcrashes_summary")
logger.setLevel(logging.WARNING)
stderrLog = logging.StreamHandler()
stderrLog.setLevel(logging.WARNING)
stderrLogFormatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
stderrLog.setFormatter(stderrLogFormatter)
logger.addHandler(stderrLog)
kwargs = {}
for i in sys.argv[1:]:
if i.startswith('-h') or i.startswith('--he'):
help()
sys.exit(0)
j = i
if i.startswith('-'):
j = i.lstrip('-')
if '=' in j:
name,value = (s.strip() for s in j.split('='))
kwargs[name] = value
else:
print >> sys.stderr,"Ignoring unkown argument '%s'"%(i)
sys.argv = sys.argv[:1]
config = configurationManager.newConfiguration(configurationModule = testConfig, applicationName='Create Database')
config.update(kwargs)
testDB = TestDB()
testDB.removeDB(config,logger)
testDB.createDB(config,logger)
if __name__ == '__main__':
main()
|
polyaxon/polyaxon | refs/heads/master | core/tests/test_transports/test_http_transport.py | 1 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from polyaxon.client.transport import Transport
from tests.test_transports.utils import BaseTestCaseTransport
class TestHttpTransport(BaseTestCaseTransport):
# pylint:disable=protected-access
def setUp(self):
super().setUp()
self.transport = Transport()
def test_session(self):
assert hasattr(self.transport, "_session") is False
assert isinstance(self.transport.session, requests.Session)
assert isinstance(self.transport._session, requests.Session)
|
zincumyx/Mammoth | refs/heads/master | mammoth-src/build/contrib/hod/hodlib/Common/allocationManagerUtil.py | 182 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""defines Allocation Manager Utilities"""
# -*- python -*-
from hodlib.allocationManagers.goldAllocationManager import goldAllocationManager
class allocationManagerUtil:
def getAllocationManager(name, cfg, log):
"""returns a concrete instance of the specified AllocationManager"""
if name == 'gold':
return goldAllocationManager(cfg, log)
getAllocationManager = staticmethod(getAllocationManager)
|
frank-tancf/scikit-learn | refs/heads/master | examples/model_selection/plot_validation_curve.py | 141 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
|
weisongchen/flaskapp | refs/heads/master | venv/lib/python2.7/site-packages/setuptools/__init__.py | 130 | """Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
from distutils.util import convert_path
from fnmatch import fnmatchcase
from six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature
from setuptools.depends import Require
from . import monkey
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages',
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder(object):
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
setup = distutils.core.setup
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
monkey.patch_all()
|
zenodo/invenio | refs/heads/zenodo-master | invenio/modules/bulletin/format_elements/bfe_webjournal_widget_featureRecord.py | 13 | # -*- coding: utf-8 -*-
# $Id: bfe_webjournal_widget_forTheEyes.py,v 1.7 2008/06/03 10:04:16 jerome Exp $
#
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal widget - List the featured records
"""
from invenio.modules.formatter.engine import BibFormatObject
from invenio.config import CFG_SITE_URL, CFG_SITE_RECORD
from invenio.legacy.webjournal.utils import \
parse_url_string, \
get_featured_records
def format_element(bfo):
"""
List the 'featured' records
"""
args = parse_url_string(bfo.user_info['uri'])
journal_name = args["journal_name"]
featured_records = get_featured_records(journal_name)
lines = []
for (recid, img_url) in featured_records:
featured_record = BibFormatObject(recid)
if bfo.lang == 'fr':
title = featured_record.field('246_1a')
if title == '':
# No French translation, get it in English
title = featured_record.field('245__a')
else:
title = featured_record.field('245__a')
lines.append('''
<a href="%s/%s/%s?ln=%s" style="display:block">
<img src="%s" alt="" width="100" class="phr" />
%s
</a>
''' % (CFG_SITE_URL, CFG_SITE_RECORD, recid, bfo.lang, img_url, title))
return '<br/><br/>'.join(lines)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
smjurcak/csm | refs/heads/master | csmserver/parsers/platforms/IOS_XR.py | 1 | # =============================================================================
# Copyright (c) 2015, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import re
from models import Package
from constants import PackageState
from base import BaseSoftwarePackageParser, BaseInventoryParser
from models import get_db_session_logger
class IOSXRSoftwarePackageParser(BaseSoftwarePackageParser):
def set_host_packages_from_cli(self, ctx):
inactive_packages = {}
active_packages = {}
committed_packages = {}
host_packages = []
cli_show_install_inactive = ctx.load_data('cli_show_install_inactive')
cli_show_install_active = ctx.load_data('cli_show_install_active')
cli_show_install_committed = ctx.load_data('cli_show_install_committed')
if isinstance(cli_show_install_inactive, list):
inactive_packages = self.parseContents(cli_show_install_inactive[0], PackageState.INACTIVE)
if isinstance(cli_show_install_active, list):
active_packages = self.parseContents(cli_show_install_active[0], PackageState.ACTIVE)
if isinstance(cli_show_install_committed, list):
committed_packages = self.parseContents(cli_show_install_committed[0], PackageState.ACTIVE_COMMITTED)
if committed_packages:
for package in active_packages.values():
if package.name in committed_packages:
package.state = PackageState.ACTIVE_COMMITTED
for package in inactive_packages.values():
if package.name in committed_packages:
# This is when the package is deactivated
package.state = PackageState.INACTIVE_COMMITTED
for package in active_packages.values():
host_packages.append(package)
for package in inactive_packages.values():
host_packages.append(package)
if len(host_packages) > 0:
ctx.host.packages = host_packages
return True
return False
def parseContents(self, lines, package_state):
package_dict = {}
if lines is None:
return package_dict
found = False
lines = lines.splitlines()
for line in lines:
if found:
line = line.strip()
if ':' in line:
location, name = line.split(':')
else:
location = ''
name = line
# skip anything after the blank line
if len(line) == 0:
break
package = Package(location=location, name=name, state=package_state)
package_dict[name] = package
elif 'Packages' in line:
found = True
return package_dict
class ASR9KInventoryParser(BaseInventoryParser):
REGEX_SATELLITE_CHASSIS = re.compile(r'satellite chassis', flags=re.IGNORECASE)
def parse_inventory_output(self, output):
"""
Get everything except for the Generic Fan inventories from the inventory data
"""
return [m.groupdict() for m in self.REGEX_BASIC_PATTERN.finditer(output)
if 'Generic Fan' not in m.group('description')]
def process_inventory(self, ctx):
"""
For ASR9K IOS-XR.
There is only one chassis in this case. It most likely shows up last in the
output of "admin show inventory".
Example:
NAME: "chassis ASR-9006-AC", DESCR: "ASR 9006 4 Line Card Slot Chassis with V1 AC PEM"
PID: ASR-9006-AC, VID: V01, SN: FOX1523H7HA
"""
if not ctx.load_data('cli_show_inventory'):
return
inventory_output = ctx.load_data('cli_show_inventory')[0]
inventory_data = self.parse_inventory_output(inventory_output)
chassis_indices = []
for idx in xrange(0, len(inventory_data)):
if self.REGEX_CHASSIS.search(inventory_data[idx]['name']) and \
(not self.REGEX_SATELLITE_CHASSIS.search(inventory_data[idx]['name'])) and \
self.REGEX_CHASSIS.search(inventory_data[idx]['description']):
chassis_indices.append(idx)
if chassis_indices:
return self.store_inventory(ctx, inventory_data, chassis_indices)
logger = get_db_session_logger(ctx.db_session)
logger.exception('Failed to find chassis in inventory output for host {}.'.format(ctx.host.hostname))
return
class CRSInventoryParser(BaseInventoryParser):
def process_inventory(self, ctx):
"""
For CRS.
There can be more than one chassis in this case.
Example for CRS:
NAME: "Rack 0 - Chassis", DESCR: "CRS 16 Slots Line Card Chassis for CRS-16/S-B"
PID: CRS-16-LCC-B, VID: V03, SN: FXS1804Q576
"""
if not ctx.load_data('cli_show_inventory'):
return
inventory_output = ctx.load_data('cli_show_inventory')[0]
inventory_data = self.parse_inventory_output(inventory_output)
chassis_indices = []
for idx in xrange(0, len(inventory_data)):
if self.REGEX_CHASSIS.search(inventory_data[idx]['name']) and \
self.REGEX_CHASSIS.search(inventory_data[idx]['description']):
chassis_indices.append(idx)
if chassis_indices:
return self.store_inventory(ctx, inventory_data, chassis_indices)
logger = get_db_session_logger(ctx.db_session)
logger.exception('Failed to find chassis in inventory output for host {}.'.format(ctx.host.hostname))
return
|
MatthewJWalls/FlaskStrap | refs/heads/master | core/__init__.py | 12133432 | |
Laurawly/tvm-1 | refs/heads/master | tests/python/unittest/test_auto_scheduler_loop_state.py | 5 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test loop state and schedule primitives"""
import numpy as np
import tvm
from tvm import auto_scheduler, te
from tvm import topi
from test_auto_scheduler_common import (
matmul_auto_scheduler_test,
conv2d_nchw_bn_relu_auto_scheduler_test,
)
def test_split_fuse_reorder_annotation():
A, B, C = matmul_auto_scheduler_test(N=512, M=512, K=512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
i, j, k = s0[C].iters
assert i.range.extent == 512
io, ii = s0.split(C, i, [16])
assert s0[C].iters[0] == io
assert s0[C].iters[1] == ii
assert io.range.extent == 32
assert ii.range.extent == 16
jo, ji = s0.split(C, j, [8])
assert jo.range.extent == 64
assert ji.range.extent == 8
s0.reorder(C, [io, jo, k, ji, ii])
assert s0[C].iters[2].range.extent == 512
fused_it = s0.fuse(C, [io, jo])
assert fused_it.range.extent == 2048
s1 = dag.get_init_state()
i, j, _ = s1[C].iters
i1, i2, i3 = s1.split(C, i, [8, 2])
j1, j2, j3 = s1.split(C, j, [32, 8], False)
assert s1[C].iters[0].range.extent == 32
assert s1[C].iters[1].range.extent == 8
assert s1[C].iters[2].range.extent == 2
assert s1[C].iters[3].range.extent == 32
assert s1[C].iters[4].range.extent == 8
assert s1[C].iters[5].range.extent == 2
res = s1.bind(C, i1, "blockIdx.x")
assert res == s1[C].iters[0]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["blockIdx.x"]
res = s1.bind(C, i2, "vthread")
assert res == s1[C].iters[1]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["vthread"]
res = s1.bind(C, i3, "threadIdx.y")
assert res == s1[C].iters[2]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["threadIdx.y"]
res = s1.parallel(C, j1)
assert res == s1[C].iters[3]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["parallel"]
res = s1.unroll(C, j2)
assert res == s1[C].iters[4]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["unroll"]
res = s1.vectorize(C, j3)
assert res == s1[C].iters[5]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["vectorize"]
def test_compute_at_root_inline():
dag = auto_scheduler.ComputeDAG(
conv2d_nchw_bn_relu_auto_scheduler_test(
N=1, H=224, W=224, CI=3, CO=64, kernel_size=7, strides=2, padding=3
)
)
s0 = dag.get_init_state()
# data, padding, kernel = 0, 1, 2
conv = s0.stage_ops[3]
# bias = 4
bias_add = s0.stage_ops[5]
# bn_scale = 6
bn_mul = s0.stage_ops[7]
# bn_offset = 8
bn_add = s0.stage_ops[9]
relu = s0.stage_ops[10]
s0.compute_inline(bn_add)
assert s0[bn_add].compute_at == 1
s0.compute_inline(bn_mul)
assert s0[bn_mul].compute_at == 1
s0.compute_inline(bias_add)
assert s0[bias_add].compute_at == 1
assert s0[conv].iters[0].range.extent == 1
assert s0[conv].iters[1].range.extent == 64
assert s0[conv].iters[2].range.extent == 112
assert s0[conv].iters[3].range.extent == 112
assert s0[conv].iters[4].range.extent == 3
assert s0[conv].iters[5].range.extent == 7
assert s0[conv].iters[6].range.extent == 7
s0.compute_at(conv, relu, s0[relu].iters[2])
assert s0[conv].compute_at == 2
s0 = dag.infer_bound_from_state(s0)
assert s0[conv].iters[0].range.extent == 1
assert s0[conv].iters[1].range.extent == 1
assert s0[conv].iters[2].range.extent == 1
assert s0[conv].iters[3].range.extent == 112
assert s0[conv].iters[4].range.extent == 3
assert s0[conv].iters[5].range.extent == 7
assert s0[conv].iters[6].range.extent == 7
s0.compute_root(bn_mul)
assert s0[bn_mul].compute_at == 0
s0.compute_root(conv)
assert s0[conv].compute_at == 0
s0 = dag.infer_bound_from_state(s0)
assert s0[conv].iters[0].range.extent == 1
assert s0[conv].iters[1].range.extent == 64
assert s0[conv].iters[2].range.extent == 112
assert s0[conv].iters[3].range.extent == 112
assert s0[conv].iters[4].range.extent == 3
assert s0[conv].iters[5].range.extent == 7
assert s0[conv].iters[6].range.extent == 7
def test_cache_read_write():
N, H, W, CO, CI, KH, KW, strides, padding = 4, 7, 7, 512, 512, 3, 3, (1, 1), (1, 1)
data = te.placeholder((N, CI, H, W), name="Data")
kernel_data = te.placeholder((CO, CI, KH, KW), name="Kernel_data")
k0, k1 = te.compute(
kernel_data.shape,
lambda *i: (kernel_data(*i) + 1, kernel_data(*i) / 2),
name="Kernel_split",
)
kernel = te.compute(kernel_data.shape, lambda *i: k0(*i) + k1(*i), name="Kernel")
conv = topi.nn.conv2d_nchw(data, kernel, strides, padding, dilation=1)
relu = topi.nn.relu(conv)
add = topi.add(data, relu)
dag = auto_scheduler.ComputeDAG([data, kernel_data, add])
s0 = dag.get_init_state()
pad_temp = s0.stage_ops[1]
kernel_split = s0.stage_ops[3]
# 0: init state
ori_its = s0[add].iters
its = s0.split(add, s0[add].iters[0], [2])
s0.reorder(add, [its[0], ori_its[1], its[1], ori_its[2], ori_its[3]])
s0.compute_inline(relu)
# 1: simple cache_write with compute_at
conv_global = s0.cache_write(conv, "global")
s0.compute_at(conv_global, conv, s0[conv].iters[3])
# 2: simple cache_read with compute_at
kernel_global = s0.cache_read(kernel, "global", [conv_global])
s0.compute_at(kernel_global, conv_global, s0[conv_global].iters[4])
"""
Placeholder: Data, Kernel_data
for i0 (0,4)
for i1 (0,512)
for i2 (0,9)
for i3 (0,9)
pad_temp = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel = ...
for nn (0,4)
for ff (0,512)
for yy (0,7)
for xx (0,7)
for nn_c (None)
for ff_c (None)
for yy_c (None)
for xx_c (None)
for rc (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
Kernel.global = ...
for ry (None)
for rx (None)
compute.global = ...
compute = ...
for ax0.0 (0,2)
for ax1 (0,512)
for ax0.1 (0,2)
for ax2 (0,7)
for ax3 (0,7)
T_add = ...
"""
s1 = dag.infer_bound_from_state(s0)
assert s1[conv].iters[0].range.extent == 4
assert s1[conv].iters[1].range.extent == 512
assert s1[conv].iters[2].range.extent == 7
assert s1[conv].iters[3].range.extent == 7
assert s1[kernel_global].iters[0].range.extent == 1
assert s1[kernel_global].iters[1].range.extent == 1
assert s1[kernel_global].iters[2].range.extent == 3
assert s1[kernel_global].iters[3].range.extent == 3
assert s1[conv_global].iters[0].range.extent == 1
assert s1[conv_global].iters[1].range.extent == 1
assert s1[conv_global].iters[2].range.extent == 1
assert s1[conv_global].iters[3].range.extent == 1
assert s1[conv_global].iters[4].range.extent == 512
assert s1[conv_global].iters[5].range.extent == 3
assert s1[conv_global].iters[6].range.extent == 3
# 3: two level cache_read with compute_at
# preparing for GPU's shared memory & local memory
pad_temp_global = s0.cache_read(pad_temp, "global", [conv_global])
pad_temp_shared = s0.cache_read(pad_temp_global, "shared", [conv_global])
s0.compute_at(pad_temp_global, conv_global, s0[conv_global].iters[2])
s0.compute_at(pad_temp_shared, conv_global, s0[conv_global].iters[4])
# 4: cache_read with multi readers
# This stage cannot be compute at to its consumer
s0.cache_read(data, "global", [pad_temp, add])
"""
Placeholder: Data, Kernel_data
for ax0 (0,4)
for ax1 (0,512)
for ax2 (0,7)
for ax3 (0,7)
Data.global = ...
for i0 (0,4)
for i1 (0,512)
for i2 (0,9)
for i3 (0,9)
pad_temp = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel = ...
for nn (0,4)
for ff (0,512)
for yy (0,7)
for xx (0,7)
for nn_c (None)
for ff_c (None)
for yy_c (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global = ...
for xx_c (None)
for rc (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
Kernel.global = ...
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global.shared = ...
for ry (None)
for rx (None)
compute.global = ...
compute = ...
for ax0.0 (0,2)
for ax1 (0,512)
for ax0.1 (0,2)
for ax2 (0,7)
for ax3 (0,7)
T_add = ...
"""
s1 = dag.infer_bound_from_state(s0)
assert s1[conv].iters[0].range.extent == 4
assert s1[conv].iters[1].range.extent == 512
assert s1[conv].iters[2].range.extent == 7
assert s1[conv].iters[3].range.extent == 7
assert s1[kernel_global].iters[0].range.extent == 1
assert s1[kernel_global].iters[1].range.extent == 1
assert s1[kernel_global].iters[2].range.extent == 3
assert s1[kernel_global].iters[3].range.extent == 3
assert s1[conv_global].iters[0].range.extent == 1
assert s1[conv_global].iters[1].range.extent == 1
assert s1[conv_global].iters[2].range.extent == 1
assert s1[conv_global].iters[3].range.extent == 1
assert s1[conv_global].iters[4].range.extent == 512
assert s1[conv_global].iters[5].range.extent == 3
assert s1[conv_global].iters[6].range.extent == 3
assert s1[pad_temp_global].iters[0].range.extent == 1
assert s1[pad_temp_global].iters[1].range.extent == 512
assert s1[pad_temp_global].iters[2].range.extent == 3
assert s1[pad_temp_global].iters[3].range.extent == 3
assert s1[pad_temp_shared].iters[0].range.extent == 1
assert s1[pad_temp_shared].iters[1].range.extent == 1
assert s1[pad_temp_shared].iters[2].range.extent == 3
assert s1[pad_temp_shared].iters[3].range.extent == 3
# 5: cache_write with multi outputs
# TVM's cache_write actually has a bug with this case:
#
# After schedule.cache_write, TVM generate one new stage:
# From: kernel_data -> kernel_split -> kernel
# To: kernel_data -> kernel_split_global -> kernel_split -> kernel
#
# But with topo sort analyse, we get:
# // kernel_data -> kernel_split_global -> kernel_split -> kernel
# \ /
# ----------------> kernel_split ---------------->
#
# TODO(jcf94): Seems there's bug with the input/output tensor. Such multi outputs case
# should be unusual, so we make some hack on DoCacheWrite. This should be fixed later.
kernel_split_global = s0.cache_write(kernel_split, "global")
"""
Placeholder: Data, Kernel_data
for ax0 (0,4)
for ax1 (0,512)
for ax2 (0,7)
for ax3 (0,7)
Data.global = ...
for i0 (0,4)
for i1 (0,512)
for i2 (0,9)
for i3 (0,9)
pad_temp = ...
for i0_c (0,512)
for i1_c (0,512)
for i2_c (0,3)
for i3_c (0,3)
Kernel_split.global = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
(******* Bug here, there should not be two kernel_split stage *******)
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
(******* Bug here, there should not be two kernel_split stage *******)
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel = ...
for nn (0,4)
for ff (0,512)
for yy (0,7)
for xx (0,7)
for nn_c (None)
for ff_c (None)
for yy_c (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global = ...
for xx_c (None)
for rc (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
Kernel.global = ...
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global.shared = ...
for ry (None)
for rx (None)
compute.global = ...
compute = ...
for ax0.0 (0,2)
for ax1 (0,512)
for ax0.1 (0,2)
for ax2 (0,7)
for ax3 (0,7)
T_add = ...
"""
assert len(s0[kernel_split].iters) == len(s0[kernel_split_global].iters)
for it0, it1 in zip(s0[kernel_split].iters, s0[kernel_split_global].iters):
assert it0.range == it1.range
def test_follow_split_follow_fused_split():
A, B, C = matmul_auto_scheduler_test(512, 512, 512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
C_global = s0.cache_write(C, "global")
its0 = s0.split(C, s0[C].iters[0], [4, 2, 8, 4], True)
split_step0 = len(s0.transform_steps) - 1
for level in range(1, 6):
tmp = s0.copy()
tmp.follow_split(C_global, tmp[C_global].iters[0], split_step0, level)
for i in range(0, level):
assert tmp[C].iters[i].range.extent == tmp[C_global].iters[i].range.extent
its1 = s0.split(C, s0[C].iters[5], [2, 2, 4, 8])
split_step1 = len(s0.transform_steps) - 1
its = []
for i0, i1 in zip(its0, its1):
its.append(i0)
its.append(i1)
s0.reorder(C, its)
for i in range(0, 5):
s0.fuse(C, [s0[C].iters[i], s0[C].iters[i + 1]])
for level in range(0, 4):
tmp = s0.copy()
tmp.follow_fused_split(
C_global, tmp[C_global].iters[0], [split_step0, split_step1], level, False
)
assert tmp[C].iters[level + 1].range.extent == tmp[C_global].iters[0].range.extent
for level in range(0, 4):
tmp = s0.copy()
tmp.follow_fused_split(
C_global, tmp[C_global].iters[0], [split_step0, split_step1], level, True
)
assert tmp[C].iters[level + 1].range.extent == tmp[C_global].iters[1].range.extent
def test_rfactor():
A, B, C = matmul_auto_scheduler_test(8, 8, 512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
ko, ki = s0.split(C, s0[C].iters[2], [16])
s1 = s0.copy()
C_r = s1.rfactor(C, ko, 2)
"""
Placeholder: A, B
for i (0,8)
for j (0,8)
for k_o (0,32)
for k_i (0,16)
C.rf = ...
for ax0 (0,8)
for ax1 (0,8)
for k_o_v (0,32)
C.repl = ...
"""
assert s1[C_r].iters[0].range.extent == 8
assert s1[C_r].iters[1].range.extent == 8
assert s1[C_r].iters[2].range.extent == 32
assert s1[C_r].iters[3].range.extent == 16
assert s1[C].iters[0].range.extent == 8
assert s1[C].iters[1].range.extent == 8
assert s1[C].iters[2].range.extent == 32
s2 = s0.copy()
C_r = s2.rfactor(C, ki, 2)
"""
Placeholder: A, B
for i (0,8)
for j (0,8)
for k_i (0,16)
for k_o (0,32)
C.rf = ...
for ax0 (0,8)
for ax1 (0,8)
for k_i_v (0,16)
C.repl = ...
"""
assert s2[C_r].iters[0].range.extent == 8
assert s2[C_r].iters[1].range.extent == 8
assert s2[C_r].iters[2].range.extent == 16
assert s2[C_r].iters[3].range.extent == 32
assert s2[C].iters[0].range.extent == 8
assert s2[C].iters[1].range.extent == 8
assert s2[C].iters[2].range.extent == 16
if __name__ == "__main__":
test_split_fuse_reorder_annotation()
test_compute_at_root_inline()
test_cache_read_write()
test_follow_split_follow_fused_split()
test_rfactor()
|
AlpineNow/python-alpine-api | refs/heads/master | tests/api/test_job.py | 1 | import os, time
import pytz
from datetime import datetime,timedelta
from alpine import APIClient
from alpine.exception import *
from alpine.job import *
from alpine.datasource import *
from .alpineunittest import AlpineTestCase
class TestJob(AlpineTestCase):
def setUp(self):
super(TestJob, self).setUp()
# Creating Alpine Client in setUp Function for tests
global alpine_client
global login_info
alpine_client = APIClient(self.host, self.port)
login_info = alpine_client.login(self.username, self.password)
global workspace_id
global job_id
global task_id
global workflow_id
gpdb_datasource_name = "API_Test_GPDB"
hadoop_datasource_name = "API_Test_Hadoop"
database_name = "miner_demo"
# Creating a Workspace for Job tests
db_datasource_id = alpine_client.datasource.get_id(gpdb_datasource_name, "Database")
hadoop_datasource_id = alpine_client.datasource.get_id(hadoop_datasource_name, "Hadoop")
try:
workspace_id = alpine_client.workspace.get_id("Workspace for Job Tests")
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create("Workspace for Job Tests")
workspace_id = workspace_info['id']
workspace_name = workspace_info['name']
# Upload a DB flow
base_dir = os.getcwd()
afm_path = "{0}/data/afm/db_row_fil_with_variable.afm".format(base_dir)
# afm_path = "db_bat_row_fil.afm"
try:
workfile_id = alpine_client.workfile.get_id("db_row_fil_with_variable", workspace_id)
alpine_client.workfile.delete(workfile_id)
except WorkfileNotFoundException:
pass
data_source_id = alpine_client.datasource.get_id(gpdb_datasource_name, "Database")
database_list = alpine_client.datasource.get_database_list(data_source_id)
for database in database_list:
if database['name'] == "miner_demo":
database_id = database['id']
datasource_info = [{"data_source_type": DataSource.DSType.GreenplumDatabase,
"data_source_id": db_datasource_id,
"database_id": database_id
}]
workfile_info = alpine_client.workfile.upload(workspace_id, afm_path, datasource_info)
workflow_id = workfile_info['id']
workfile_name = workfile_info['file_name']
# Creating a Workspace for Job tests
job_info = alpine_client.job.create(workspace_id, "Job for Test")
job_id = job_info['id']
task_info = alpine_client.job.task.create(workspace_id, job_id, workflow_id)
task_id = task_info['id']
def tearDown(self):
try:
alpine_client.workspace.delete(workspace_id)
alpine_client.logout()
except:
pass
def test_get_jobs_list(self):
jobs_list = alpine_client.job.get_list(workspace_id)
self.assertIsNotNone(jobs_list)
def test_get_job_detail(self):
job_detail = alpine_client.job.get(workspace_id, job_id)
self.assertEqual(job_detail['name'], "Job for Test")
def test_get_job_id(self):
job_name = "test"
try:
job_id = alpine_client.job.get_id(workspace_id, job_name)
alpine_client.job.delete(workspace_id, job_id)
except JobNotFoundException:
pass
job_info=alpine_client.job.create(workspace_id, job_name)
job_id = alpine_client.job.get_id(workspace_id, job_name)
self.assertEqual(job_id, job_info['id'])
def test_create_job_on_demand(self):
job_name = "test_job_schedule_on_demand"
try:
job_id = alpine_client.job.get_id(workspace_id, job_name)
alpine_client.job.delete(workspace_id, job_id)
except JobNotFoundException:
pass
job_info = alpine_client.job.create(workspace_id, job_name, Job.ScheduleType.OnDemand, 0, "")
self.assertIsNotNone(job_info)
def test_create_job_on_monthly(self):
job_name = "test_job_schedule_monthly"
next_run_datetime_format = "%Y-%m-%dT%H:%M:%S"
job_interval_unit = Job.ScheduleType.Monthly
job_interval_value = 1
try:
job_id = alpine_client.job.get_id(workspace_id, job_name)
alpine_client.job.delete(workspace_id, job_id)
except JobNotFoundException:
pass
start_time = (datetime.today().now(pytz.utc) + timedelta(minutes = 1)).strftime(next_run_datetime_format)
print(start_time)
job_info = alpine_client.job.create(workspace_id, job_name,
job_interval_unit, job_interval_value, start_time)
print(job_info)
self.assertIsNotNone(job_info)
self.assertEqual(job_info['name'], job_name)
self.assertEqual(job_info['interval_unit'],job_interval_unit )
self.assertEqual(job_info['interval_value'],job_interval_value)
next_run_info_utc = datetime.strptime("".join(job_info['next_run'].rsplit('Z', 1)),next_run_datetime_format).\
replace(tzinfo = pytz.utc)
self.assertEqual(next_run_info_utc.strftime(next_run_datetime_format),
start_time,next_run_datetime_format)
def test_create_job_on_weekly(self):
job_name = "test_job_schedule_weekly"
next_run_datetime_format = "%Y-%m-%dT%H:%M:%S"
job_interval_unit = Job.ScheduleType.Weekly
job_interval_value = 1
try:
job_id = alpine_client.job.get_id(workspace_id, job_name)
alpine_client.job.delete(workspace_id, job_id)
except JobNotFoundException:
pass
start_time = (datetime.today().now(pytz.utc) + timedelta(minutes = 1)).strftime(next_run_datetime_format)
print(start_time)
job_info = alpine_client.job.create(workspace_id, job_name,
job_interval_unit, job_interval_value, start_time)
print(job_info)
self.assertIsNotNone(job_info)
self.assertEqual(job_info['name'], job_name)
self.assertEqual(job_info['interval_unit'],job_interval_unit )
self.assertEqual(job_info['interval_value'],job_interval_value)
next_run_info_utc = datetime.strptime("".join(job_info['next_run'].rsplit('Z', 1)),next_run_datetime_format).\
replace(tzinfo = pytz.utc)
self.assertEqual(next_run_info_utc.strftime(next_run_datetime_format),
start_time,next_run_datetime_format)
def test_create_job_on_daily(self):
job_name = "test_job_schedule_daily"
next_run_datetime_format = "%Y-%m-%dT%H:%M:%S"
job_interval_unit = Job.ScheduleType.Daily
job_interval_value = 1
try:
job_id = alpine_client.job.get_id(workspace_id, job_name)
alpine_client.job.delete(workspace_id, job_id)
except JobNotFoundException:
pass
#start_time = (datetime.today().now(timezone.utc) + timedelta(minutes = 1)).strftime(next_run_datetime_format)
job_info = alpine_client.job.create(workspace_id, job_name,
job_interval_unit, job_interval_value)
print(job_info)
self.assertIsNotNone(job_info)
self.assertEqual(job_info['name'], job_name)
self.assertEqual(job_info['interval_unit'],job_interval_unit )
self.assertEqual(job_info['interval_value'],job_interval_value)
next_run_info_utc = datetime.strptime("".join(job_info['next_run'].rsplit('Z', 1)),next_run_datetime_format).\
replace(tzinfo = pytz.utc)
self.assertIsNotNone(next_run_info_utc.strftime(next_run_datetime_format))
def test_create_job_on_hourly(self):
job_name = "test_job_schedule_hourly"
next_run_datetime_format = "%Y-%m-%dT%H:%M:%S"
job_interval_unit = Job.ScheduleType.Hourly
job_interval_value = 1
try:
job_id = alpine_client.job.get_id(workspace_id, job_name)
alpine_client.job.delete(workspace_id, job_id)
except JobNotFoundException:
pass
#start_time = (datetime.today().now(timezone.utc) + timedelta(minutes = 1)).strftime(next_run_datetime_format)
start_time = datetime.today().now(pytz.utc) + timedelta(minutes=1)
print(start_time)
job_info = alpine_client.job.create(workspace_id, job_name,
job_interval_unit, job_interval_value, start_time)
print(job_info)
self.assertIsNotNone(job_info)
self.assertEqual(job_info['name'], job_name)
self.assertEqual(job_info['interval_unit'],job_interval_unit )
self.assertEqual(job_info['interval_value'],job_interval_value)
next_run_info_utc = datetime.strptime("".join(job_info['next_run'].rsplit('Z', 1)),next_run_datetime_format).\
replace(tzinfo = pytz.utc)
self.assertEqual(next_run_info_utc.strftime(next_run_datetime_format),
start_time.strftime(next_run_datetime_format))
def test_delete_job_from_workspace(self):
job_name = "test_job_to_be_deleted"
try:
job_id = alpine_client.job.get_id(workspace_id, job_name)
alpine_client.job.delete(workspace_id, job_id)
except JobNotFoundException:
pass
job_info = alpine_client.job.create(workspace_id, job_name)
response = alpine_client.job.delete(workspace_id, job_info['id'])
# Verify the Job is successfully deleted
try:
alpine_client.job.get(workspace_id, job_info['id'])
except JobNotFoundException:
pass
else:
self.fail("Failed to Delete the Job {0}".format(job_name))
def test_get_tasks_on_a_job(self):
tasks_list = alpine_client.job.task.get_list(workspace_id, job_id)
self.assertNotEqual(0, len(tasks_list))
self.assertEqual(tasks_list[0]['id'], task_id)
def test_add_workflow_task(self):
job_name = "test_job_with_workflow_tasks"
next_run_datetime_format = "%Y-%m-%dT%H:%M:%S"
#next_run_datetime_format = "%Y-%m-%dT%H:%M:%S"
job_interval_unit = Job.ScheduleType.Weekly
job_interval_value = 2
try:
job_id = alpine_client.job.get_id(workspace_id, job_name)
alpine_client.job.delete(workspace_id, job_id)
except JobNotFoundException:
pass
#start_time = (datetime.today().now(pytz.timezone('US/Pacific')) + timedelta(minutes = 1)).strftime(next_run_datetime_format)
start_time = datetime.today().now(pytz.utc) + timedelta(minutes = 1)
print(start_time)
job_info = alpine_client.job.create(workspace_id, job_name, job_interval_unit,job_interval_value,
start_time, pytz.utc)
task_info = alpine_client.job.task.create(workspace_id, job_info['id'], workflow_id)
self.assertIsNotNone(task_info)
self.assertTrue(task_info['is_valid'])
wait_interval_max = 300
print("Wait for max to {0} seconds for the scheduled job run finished".format(wait_interval_max))
for i in range(0, wait_interval_max):
job_info_new = alpine_client.job.get(workspace_id, job_info['id'])
if job_info_new['last_run'] is None:
time.sleep(1)
else:
break
self.assertIsNotNone(job_info_new['last_run'])
next_run_info_new = datetime.strptime("".join(job_info_new['next_run'].rsplit('Z', 1)),
next_run_datetime_format).replace(tzinfo=pytz.utc)
self.assertEqual(next_run_info_new.strftime(next_run_datetime_format),
(start_time + timedelta(weeks=2)).strftime(next_run_datetime_format))
def test_get_task_info(self):
task_info = alpine_client.job.task.get(workspace_id, job_id, task_id)
self.assertEqual(task_info['id'], task_id)
def test_get_task_id(self):
task_name = alpine_client.job.task.get(workspace_id, job_id, task_id)['name']
task_id_get = alpine_client.job.task.get_id(workspace_id, job_id, task_name)
self.assertEqual(task_id,task_id_get)
def test_delete_task(self):
job_info = alpine_client.job.create(workspace_id, "Job for Test Delete Tasks")
task_info = alpine_client.job.task.create(workspace_id, job_info['id'], workflow_id)
tasks = alpine_client.job.task.get_list(workspace_id, job_info['id'])
self.assertEqual(1, len(tasks))
time.sleep(3)
alpine_client.job.task.delete(workspace_id, job_info['id'], task_info['id'])
time.sleep(3)
new_tasks = alpine_client.job.task.get_list(workspace_id, job_info['id'])
self.assertEqual(0, len(new_tasks))
try:
alpine_client.job.task.get(workspace_id, job_info['id'], task_info['id'])
except TaskNotFoundException:
pass
else:
self.fail("Failed to Delete the Task {0}".format(task_info['name']))
def test_run_workflow_task(self):
job_name = "test_job_with_workflow_tasks"
#next_run_datetime_format = "%Y-%m-%dT%H:%M:%S"
#next_run_datetime_format = "%Y-%m-%dT%H:%M:%S%z"
job_interval_unit = Job.ScheduleType.OnDemand
job_interval_value = 0
try:
job_id = alpine_client.job.get_id(workspace_id, job_name)
alpine_client.job.delete(workspace_id, job_id)
except JobNotFoundException:
pass
#start_time = (datetime.today().now(pytz.timezone('US/Pacific')) + timedelta(minutes = 1)).strftime(next_run_datetime_format)
#start_time = datetime.today().now(pytz.utc) + timedelta(minutes = 1)
#print(start_time)
job_info = alpine_client.job.create(workspace_id, job_name, job_interval_unit,job_interval_value)
task_info = alpine_client.job.task.create(workspace_id, job_info['id'], workflow_id)
self.assertIsNotNone(task_info)
self.assertTrue(task_info['is_valid'])
time.sleep(1)
s = alpine_client.job.run(job_info['id'])
wait_interval_max = 300
print("Wait for max to {0} seconds for the scheduled job run finished".format(wait_interval_max))
for i in range(0, wait_interval_max):
job_info_new = alpine_client.job.get(workspace_id, job_info['id'])
if job_info_new['last_run'] is None or job_info_new['status'] == "running":
time.sleep(1)
else:
break
self.assertIsNotNone(job_info_new['last_run'])
|
alexcuellar/odoo | refs/heads/8.0 | addons/procurement_jit/__openerp__.py | 312 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Just In Time Scheduling',
'version': '1.0',
'category': 'Base',
'description': """
This module allows Just In Time computation of procurement orders.
==================================================================
If you install this module, you will not have to run the regular procurement
scheduler anymore (but you still need to run the minimum order point rule
scheduler, or for example let it run daily).
All procurement orders will be processed immediately, which could in some
cases entail a small performance impact.
It may also increase your stock size because products are reserved as soon
as possible and the scheduler time range is not taken into account anymore.
In that case, you can not use priorities any more on the different picking.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['procurement'],
'data': [],
'demo': [],
'test': ['test/procurement_jit.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dmitry-r/incubator-airflow | refs/heads/master | tests/dags/test_double_trigger.py | 39 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
DEFAULT_DATE = datetime(2016, 1, 1)
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
}
dag = DAG(dag_id='test_localtaskjob_double_trigger', default_args=args)
task = DummyOperator(
task_id='test_localtaskjob_double_trigger_task',
dag=dag)
|
youprofit/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/showresults.py | 146 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from model.queuestatus import QueueStatus
class ShowResults(webapp.RequestHandler):
def get(self, status_id):
status = QueueStatus.get_by_id(int(status_id))
if not status:
self.error(404)
return
self.response.headers["Content-Type"] = "text/plain; charset=utf-8"
self.response.out.write(status.results_file)
|
arantebillywilson/python-snippets | refs/heads/master | microblog/flask/lib/python3.5/site-packages/pip/utils/hashes.py | 517 | from __future__ import absolute_import
import hashlib
from pip.exceptions import HashMismatch, HashMissing, InstallationError
from pip.utils import read_chunks
from pip._vendor.six import iteritems, iterkeys, itervalues
# The recommended hash algo of the moment. Change this whenever the state of
# the art changes; it won't hurt backward compatibility.
FAVORITE_HASH = 'sha256'
# Names of hashlib algorithms allowed by the --hash option and ``pip hash``
# Currently, those are the ones at least as collision-resistant as sha256.
STRONG_HASHES = ['sha256', 'sha384', 'sha512']
class Hashes(object):
"""A wrapper that builds multiple hashes at once and checks them against
known-good values
"""
def __init__(self, hashes=None):
"""
:param hashes: A dict of algorithm names pointing to lists of allowed
hex digests
"""
self._allowed = {} if hashes is None else hashes
def check_against_chunks(self, chunks):
"""Check good hashes against ones built from iterable of chunks of
data.
Raise HashMismatch if none match.
"""
gots = {}
for hash_name in iterkeys(self._allowed):
try:
gots[hash_name] = hashlib.new(hash_name)
except (ValueError, TypeError):
raise InstallationError('Unknown hash name: %s' % hash_name)
for chunk in chunks:
for hash in itervalues(gots):
hash.update(chunk)
for hash_name, got in iteritems(gots):
if got.hexdigest() in self._allowed[hash_name]:
return
self._raise(gots)
def _raise(self, gots):
raise HashMismatch(self._allowed, gots)
def check_against_file(self, file):
"""Check good hashes against a file-like object
Raise HashMismatch if none match.
"""
return self.check_against_chunks(read_chunks(file))
def check_against_path(self, path):
with open(path, 'rb') as file:
return self.check_against_file(file)
def __nonzero__(self):
"""Return whether I know any known-good hashes."""
return bool(self._allowed)
def __bool__(self):
return self.__nonzero__()
class MissingHashes(Hashes):
"""A workalike for Hashes used when we're missing a hash for a requirement
It computes the actual hash of the requirement and raises a HashMissing
exception showing it to the user.
"""
def __init__(self):
"""Don't offer the ``hashes`` kwarg."""
# Pass our favorite hash in to generate a "gotten hash". With the
# empty list, it will never match, so an error will always raise.
super(MissingHashes, self).__init__(hashes={FAVORITE_HASH: []})
def _raise(self, gots):
raise HashMissing(gots[FAVORITE_HASH].hexdigest())
|
FederatedAI/FATE | refs/heads/master | python/fate_arch/session/__init__.py | 1 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing import is_table
from fate_arch.session._parties import PartiesInfo
from fate_arch.session._session import Session, get_latest_opened, computing_session, runtime_parties
__all__ = ['is_table', 'Session', 'PartiesInfo', 'get_latest_opened', 'computing_session', 'runtime_parties']
|
jnerin/ansible | refs/heads/devel | lib/ansible/modules/database/proxysql/proxysql_manage_config.py | 42 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_manage_config
version_added: "2.3"
author: "Ben Mildren (@bmildren)"
short_description: Writes the proxysql configuration settings between layers.
description:
- The M(proxysql_global_variables) module writes the proxysql configuration
settings between layers. Currently this module will always report a
changed state, so should typically be used with WHEN however this will
change in a future version when the CHECKSUM table commands are available
for all tables in proxysql.
options:
action:
description:
- The supplied I(action) combines with the supplied I(direction) to
provide the semantics of how we want to move the I(config_settings)
between the I(config_layers).
choices: [ "LOAD", "SAVE" ]
required: True
config_settings:
description:
- The I(config_settings) specifies which configuration we're writing.
choices: [ "MYSQL USERS", "MYSQL SERVERS", "MYSQL QUERY RULES",
"MYSQL VARIABLES", "ADMIN VARIABLES", "SCHEDULER" ]
required: True
direction:
description:
- FROM - denotes we're reading values FROM the supplied I(config_layer)
and writing to the next layer.
TO - denotes we're reading from the previous layer and writing TO the
supplied I(config_layer)."
choices: [ "FROM", "TO" ]
required: True
config_layer:
description:
- RUNTIME - represents the in-memory data structures of ProxySQL used by
the threads that are handling the requests.
MEMORY - (sometimes also referred as main) represents the in-memory
SQLite3 database.
DISK - represents the on-disk SQLite3 database.
CONFIG - is the classical config file. You can only LOAD FROM the
config file.
choices: [ "MEMORY", "DISK", "RUNTIME", "CONFIG" ]
required: True
extends_documentation_fragment:
- proxysql.connectivity
'''
EXAMPLES = '''
---
# This example saves the mysql users config from memory to disk. It uses
# supplied credentials to connect to the proxysql admin interface.
- proxysql_global_variables:
login_user: 'admin'
login_password: 'admin'
action: "SAVE"
config_settings: "MYSQL USERS"
direction: "FROM"
config_layer: "MEMORY"
# This example loads the mysql query rules config from memory to to runtime. It
# uses supplied credentials to connect to the proxysql admin interface.
- proxysql_global_variables:
config_file: '~/proxysql.cnf'
action: "LOAD"
config_settings: "MYSQL QUERY RULES"
direction: "TO"
config_layer: "RUNTIME"
'''
RETURN = '''
stdout:
description: Simply reports whether the action reported a change.
returned: Currently the returned value with always be changed=True.
type: dict
"sample": {
"changed": true
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.mysql import mysql_connect
from ansible.module_utils._text import to_native
try:
import MySQLdb
except ImportError:
MYSQLDB_FOUND = False
else:
MYSQLDB_FOUND = True
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if module.params["config_layer"] == 'CONFIG' and \
(module.params["action"] != 'LOAD' or
module.params["direction"] != 'FROM'):
if (module.params["action"] != 'LOAD' and
module.params["direction"] != 'FROM'):
msg_string = ("Neither the action \"%s\" nor the direction" +
" \"%s\" are valid combination with the CONFIG" +
" config_layer")
module.fail_json(msg=msg_string % (module.params["action"],
module.params["direction"]))
elif module.params["action"] != 'LOAD':
msg_string = ("The action \"%s\" is not a valid combination" +
" with the CONFIG config_layer")
module.fail_json(msg=msg_string % module.params["action"])
else:
msg_string = ("The direction \"%s\" is not a valid combination" +
" with the CONFIG config_layer")
module.fail_json(msg=msg_string % module.params["direction"])
if not MYSQLDB_FOUND:
module.fail_json(
msg="the python mysqldb module is required"
)
def manage_config(manage_config_settings, cursor):
query_string = "%s" % ' '.join(manage_config_settings)
cursor.execute(query_string)
return True
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default="", type='path'),
action=dict(required=True, choices=['LOAD',
'SAVE']),
config_settings=dict(required=True, choices=['MYSQL USERS',
'MYSQL SERVERS',
'MYSQL QUERY RULES',
'MYSQL VARIABLES',
'ADMIN VARIABLES',
'SCHEDULER']),
direction=dict(required=True, choices=['FROM',
'TO']),
config_layer=dict(required=True, choices=['MEMORY',
'DISK',
'RUNTIME',
'CONFIG'])
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
action = module.params["action"]
config_settings = module.params["config_settings"]
direction = module.params["direction"]
config_layer = module.params["config_layer"]
cursor = None
try:
cursor = mysql_connect(module,
login_user,
login_password,
config_file)
except MySQLdb.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
result = {}
manage_config_settings = \
[action, config_settings, direction, config_layer]
try:
result['changed'] = manage_config(manage_config_settings,
cursor)
except MySQLdb.Error as e:
module.fail_json(
msg="unable to manage config.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
antoviaque/edx-platform | refs/heads/master | common/djangoapps/contentserver/__init__.py | 12133432 | |
ayu-mushi/memoru | refs/heads/master | lib/memoru/__init__.py | 12133432 | |
dionyself/candela | refs/heads/master | queue/__init__.py | 12133432 | |
tcwicklund/django | refs/heads/master | django/contrib/flatpages/templatetags/flatpages.py | 472 | from django import template
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
register = template.Library()
class FlatpageNode(template.Node):
def __init__(self, context_name, starts_with=None, user=None):
self.context_name = context_name
if starts_with:
self.starts_with = template.Variable(starts_with)
else:
self.starts_with = None
if user:
self.user = template.Variable(user)
else:
self.user = None
def render(self, context):
if 'request' in context:
site_pk = get_current_site(context['request']).pk
else:
site_pk = settings.SITE_ID
flatpages = FlatPage.objects.filter(sites__id=site_pk)
# If a prefix was specified, add a filter
if self.starts_with:
flatpages = flatpages.filter(
url__startswith=self.starts_with.resolve(context))
# If the provided user is not authenticated, or no user
# was provided, filter the list to only public flatpages.
if self.user:
user = self.user.resolve(context)
if not user.is_authenticated():
flatpages = flatpages.filter(registration_required=False)
else:
flatpages = flatpages.filter(registration_required=False)
context[self.context_name] = flatpages
return ''
@register.tag
def get_flatpages(parser, token):
"""
Retrieves all flatpage objects available for the current site and
visible to the specific user (or visible to all users if no user is
specified). Populates the template context with them in a variable
whose name is defined by the ``as`` clause.
An optional ``for`` clause can be used to control the user whose
permissions are to be used in determining which flatpages are visible.
An optional argument, ``starts_with``, can be applied to limit the
returned flatpages to those beginning with a particular base URL.
This argument can be passed as a variable or a string, as it resolves
from the template context.
Syntax::
{% get_flatpages ['url_starts_with'] [for user] as context_name %}
Example usage::
{% get_flatpages as flatpages %}
{% get_flatpages for someuser as flatpages %}
{% get_flatpages '/about/' as about_pages %}
{% get_flatpages prefix as about_pages %}
{% get_flatpages '/about/' for someuser as about_pages %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"['url_starts_with'] [for user] as context_name" %
dict(tag_name=bits[0]))
# Must have at 3-6 bits in the tag
if len(bits) >= 3 and len(bits) <= 6:
# If there's an even number of bits, there's no prefix
if len(bits) % 2 == 0:
prefix = bits[1]
else:
prefix = None
# The very last bit must be the context name
if bits[-2] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
# If there are 5 or 6 bits, there is a user defined
if len(bits) >= 5:
if bits[-4] != 'for':
raise template.TemplateSyntaxError(syntax_message)
user = bits[-3]
else:
user = None
return FlatpageNode(context_name, starts_with=prefix, user=user)
else:
raise template.TemplateSyntaxError(syntax_message)
|
hi2017teamB/ChatAppProject | refs/heads/master | websocket-client/websocket/_http.py | 15 | """
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import errno
import os
import socket
import sys
import six
from ._exceptions import *
from ._logging import *
from ._socket import*
from ._ssl_compat import *
from ._url import *
if six.PY3:
from base64 import encodebytes as base64encode
else:
from base64 import encodestring as base64encode
__all__ = ["proxy_info", "connect", "read_headers"]
class proxy_info(object):
def __init__(self, **options):
self.host = options.get("http_proxy_host", None)
if self.host:
self.port = options.get("http_proxy_port", 0)
self.auth = options.get("http_proxy_auth", None)
self.no_proxy = options.get("http_no_proxy", None)
else:
self.port = 0
self.auth = None
self.no_proxy = None
def connect(url, options, proxy, socket):
hostname, port, resource, is_secure = parse_url(url)
if socket:
return socket, (hostname, port, resource)
addrinfo_list, need_tunnel, auth = _get_addrinfo_list(
hostname, port, is_secure, proxy)
if not addrinfo_list:
raise WebSocketException(
"Host not found.: " + hostname + ":" + str(port))
sock = None
try:
sock = _open_socket(addrinfo_list, options.sockopt, options.timeout)
if need_tunnel:
sock = _tunnel(sock, hostname, port, auth)
if is_secure:
if HAVE_SSL:
sock = _ssl_socket(sock, options.sslopt, hostname)
else:
raise WebSocketException("SSL not available.")
return sock, (hostname, port, resource)
except:
if sock:
sock.close()
raise
def _get_addrinfo_list(hostname, port, is_secure, proxy):
phost, pport, pauth = get_proxy_info(
hostname, is_secure, proxy.host, proxy.port, proxy.auth, proxy.no_proxy)
if not phost:
addrinfo_list = socket.getaddrinfo(
hostname, port, 0, 0, socket.SOL_TCP)
return addrinfo_list, False, None
else:
pport = pport and pport or 80
addrinfo_list = socket.getaddrinfo(phost, pport, 0, 0, socket.SOL_TCP)
return addrinfo_list, True, pauth
def _open_socket(addrinfo_list, sockopt, timeout):
err = None
for addrinfo in addrinfo_list:
family = addrinfo[0]
sock = socket.socket(family)
sock.settimeout(timeout)
for opts in DEFAULT_SOCKET_OPTION:
sock.setsockopt(*opts)
for opts in sockopt:
sock.setsockopt(*opts)
address = addrinfo[4]
try:
sock.connect(address)
except socket.error as error:
error.remote_ip = str(address[0])
if error.errno in (errno.ECONNREFUSED, ):
err = error
continue
else:
raise
else:
break
else:
raise err
return sock
def _can_use_sni():
return six.PY2 and sys.version_info >= (2, 7, 9) or sys.version_info >= (3, 2)
def _wrap_sni_socket(sock, sslopt, hostname, check_hostname):
context = ssl.SSLContext(sslopt.get('ssl_version', ssl.PROTOCOL_SSLv23))
if sslopt.get('cert_reqs', ssl.CERT_NONE) != ssl.CERT_NONE:
context.load_verify_locations(cafile=sslopt.get('ca_certs', None))
if sslopt.get('certfile', None):
context.load_cert_chain(
sslopt['certfile'],
sslopt.get('keyfile', None),
sslopt.get('password', None),
)
# see
# https://github.com/liris/websocket-client/commit/b96a2e8fa765753e82eea531adb19716b52ca3ca#commitcomment-10803153
context.verify_mode = sslopt['cert_reqs']
if HAVE_CONTEXT_CHECK_HOSTNAME:
context.check_hostname = check_hostname
if 'ciphers' in sslopt:
context.set_ciphers(sslopt['ciphers'])
if 'cert_chain' in sslopt:
certfile, keyfile, password = sslopt['cert_chain']
context.load_cert_chain(certfile, keyfile, password)
return context.wrap_socket(
sock,
do_handshake_on_connect=sslopt.get('do_handshake_on_connect', True),
suppress_ragged_eofs=sslopt.get('suppress_ragged_eofs', True),
server_hostname=hostname,
)
def _ssl_socket(sock, user_sslopt, hostname):
sslopt = dict(cert_reqs=ssl.CERT_REQUIRED)
sslopt.update(user_sslopt)
if os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE'):
certPath = os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE')
else:
certPath = os.path.join(
os.path.dirname(__file__), "cacert.pem")
if os.path.isfile(certPath) and user_sslopt.get('ca_certs', None) is None:
sslopt['ca_certs'] = certPath
check_hostname = sslopt["cert_reqs"] != ssl.CERT_NONE and sslopt.pop(
'check_hostname', True)
if _can_use_sni():
sock = _wrap_sni_socket(sock, sslopt, hostname, check_hostname)
else:
sslopt.pop('check_hostname', True)
sock = ssl.wrap_socket(sock, **sslopt)
if not HAVE_CONTEXT_CHECK_HOSTNAME and check_hostname:
match_hostname(sock.getpeercert(), hostname)
return sock
def _tunnel(sock, host, port, auth):
debug("Connecting proxy...")
connect_header = "CONNECT %s:%d HTTP/1.0\r\n" % (host, port)
# TODO: support digest auth.
if auth and auth[0]:
auth_str = auth[0]
if auth[1]:
auth_str += ":" + auth[1]
encoded_str = base64encode(auth_str.encode()).strip().decode()
connect_header += "Proxy-Authorization: Basic %s\r\n" % encoded_str
connect_header += "\r\n"
dump("request header", connect_header)
send(sock, connect_header)
try:
status, resp_headers = read_headers(sock)
except Exception as e:
raise WebSocketProxyException(str(e))
if status != 200:
raise WebSocketProxyException(
"failed CONNECT via proxy status: %r" % status)
return sock
def read_headers(sock):
status = None
headers = {}
trace("--- response header ---")
while True:
line = recv_line(sock)
line = line.decode('utf-8').strip()
if not line:
break
trace(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip()
else:
raise WebSocketException("Invalid header")
trace("-----------------------")
return status, headers
|
stannynuytkens/youtube-dl | refs/heads/master | youtube_dl/extractor/embedly.py | 71 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class EmbedlyIE(InfoExtractor):
_VALID_URL = r'https?://(?:www|cdn\.)?embedly\.com/widgets/media\.html\?(?:[^#]*?&)?url=(?P<id>[^#&]+)'
_TESTS = [{
'url': 'https://cdn.embedly.com/widgets/media.html?src=http%3A%2F%2Fwww.youtube.com%2Fembed%2Fvideoseries%3Flist%3DUUGLim4T2loE5rwCMdpCIPVg&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DSU4fj_aEMVw%26list%3DUUGLim4T2loE5rwCMdpCIPVg&image=http%3A%2F%2Fi.ytimg.com%2Fvi%2FSU4fj_aEMVw%2Fhqdefault.jpg&key=8ee8a2e6a8cc47aab1a5ee67f9a178e0&type=text%2Fhtml&schema=youtube&autoplay=1',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(compat_urllib_parse_unquote(self._match_id(url)))
|
WorldBank-Transport/open-transit-indicators | refs/heads/master | python/django/datasources/tasks/osm.py | 2 | """Handles downloading and importing OSM Data"""
import os
import subprocess
import tempfile
import requests
from celery.utils.log import get_task_logger
from django.conf import settings
from django.db import connection
from datasources.models import OSMData, OSMDataProblem
from datasources.tasks.shapefile import ErrorFactory
# Note: The download is done using the overpass API
# (see:http://wiki.openstreetmap.org/wiki/Overpass_API) because
# we may be downloading large files and these endpoints are optimized
# for downloads/reads unlike the main openstreetmap API endpoint
OSM_API_URL = 'http://www.overpass-api.de/api/xapi?way[bbox=%s,%s,%s,%s][highway=*]'
# set up shared task logger
logger = get_task_logger(__name__)
def run_osm_import(osmdata_id):
"""Download and run import step for OSM data
Downloads and stores raw OSM data within a bounding box defined
by imported GTFS data. Uses the SRID defined on the gtfs_stops
table to determine correct UTM projection to import data as.
Uses Raw SQL to
- get extent from GTFS data since we
do not have models that keeps track of GTFS Data
- get UTM projection to import OSM data as correct projection
"""
logger.debug('Starting OSM import')
osm_data = OSMData.objects.get(pk=osmdata_id)
osm_data.status = OSMData.Statuses.PROCESSING
error_factory = ErrorFactory(OSMDataProblem, osm_data, 'osmdata')
def handle_error(title, description):
"""Helper method to handle shapefile errors."""
error_factory.error(title, description)
osm_data.status = OSMData.Statuses.ERROR
osm_data.save()
return
with connection.cursor() as c:
try:
# Get the bounding box for gtfs data
# split components to make it easier to parse the sql response
bbox_query = """
SELECT MIN(ST_Xmin(the_geom)),
MIN(ST_Ymin(the_geom)),
MAX(ST_Xmax(the_geom)),
MAX(ST_Ymax(the_geom))
FROM gtfs_stops;"""
logger.debug('Making query for bounding box from gtfs stops')
c.execute(bbox_query)
bbox = c.fetchone()
except Exception as e:
err_msg = 'Error obtaining bounding box from gtfs_stops table'
handle_error(err_msg, e.message)
try:
logger.debug('Making query for UTM projection srid from gtfs_stops table (geom field)')
utm_projection_query = "SELECT FIND_SRID('', 'gtfs_stops', 'geom');"
c.execute(utm_projection_query)
utm_projection = c.fetchone()[0]
except Exception as e:
err_msg = 'Error obtaining SRID from gtfs_stops table'
logger.exception(err_msg)
handle_error(err_msg, e.message)
_, temp_filename = tempfile.mkstemp()
logger.debug('Generated tempfile %s to download osm data into', temp_filename)
osm_data.source_file = temp_filename
osm_data.status = OSMData.Statuses.DOWNLOADING
osm_data.save()
try:
response = requests.get(OSM_API_URL % bbox, stream=True)
logger.debug('Downloading OSM data from overpass/OSM api')
# Download OSM data
with open(temp_filename, 'wb') as fh:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
fh.write(chunk)
fh.flush()
logger.debug('Finished downloading OSM data')
osm_data.status = OSMData.Statuses.IMPORTING
osm_data.save()
except Exception as e:
err_msg = 'Error downloading data'
logger.exception('Error downloading data')
handle_error(err_msg, e.message)
# Get Database settings
db_host = settings.DATABASES['default']['HOST']
db_password = settings.DATABASES['default']['PASSWORD']
db_user = settings.DATABASES['default']['USER']
db_name = settings.DATABASES['default']['NAME']
env = os.environ.copy()
env['PGPASSWORD'] = db_password
# Insert OSM Data into Database with osm2pgsql command
osm2pgsql_command = ['osm2pgsql',
'-U', db_user,
'-H', db_host,
'-d', db_name,
'-s', # use slim mode to cache to DB rather than in-memory
'-E', str(utm_projection),
temp_filename]
try:
logger.debug('Running OSM import command %s', ' '.join(osm2pgsql_command))
subprocess.check_call(osm2pgsql_command, env=env)
osm_data.status = OSMData.Statuses.COMPLETE
except subprocess.CalledProcessError as e:
osm_data.status = OSMData.Statuses.ERROR
err_msg = 'Error running osm2pgsql command'
logger.exception('Error running osm2pgsql command')
error_factory.error(err_msg, e.message)
finally:
osm_data.save()
os.remove(temp_filename)
|
tuxology/bcc | refs/heads/master | tools/old/statsnoop.py | 9 | #!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# statsnoop Trace stat() syscalls.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: statsnoop [-h] [-t] [-x] [-p PID]
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 08-Feb-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
import argparse
# arguments
examples = """examples:
./statsnoop # trace all stat() syscalls
./statsnoop -t # include timestamps
./statsnoop -x # only show failed stats
./statsnoop -p 181 # only trace PID 181
"""
parser = argparse.ArgumentParser(
description="Trace stat() syscalls",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-x", "--failed", action="store_true",
help="only show failed stats")
parser.add_argument("-p", "--pid",
help="trace this PID only")
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
BPF_HASH(args_filename, u32, const char *);
int trace_entry(struct pt_regs *ctx, const char __user *filename)
{
u32 pid = bpf_get_current_pid_tgid();
FILTER
args_filename.update(&pid, &filename);
return 0;
};
int trace_return(struct pt_regs *ctx)
{
const char **filenamep;
int ret = ctx->ax;
u32 pid = bpf_get_current_pid_tgid();
filenamep = args_filename.lookup(&pid);
if (filenamep == 0) {
// missed entry
return 0;
}
bpf_trace_printk("%d %s\\n", ret, *filenamep);
args_filename.delete(&pid);
return 0;
}
"""
if args.pid:
bpf_text = bpf_text.replace('FILTER',
'if (pid != %s) { return 0; }' % args.pid)
else:
bpf_text = bpf_text.replace('FILTER', '')
if debug:
print(bpf_text)
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="sys_stat", fn_name="trace_entry")
b.attach_kprobe(event="sys_statfs", fn_name="trace_entry")
b.attach_kprobe(event="sys_newstat", fn_name="trace_entry")
b.attach_kretprobe(event="sys_stat", fn_name="trace_return")
b.attach_kretprobe(event="sys_statfs", fn_name="trace_return")
b.attach_kretprobe(event="sys_newstat", fn_name="trace_return")
# header
if args.timestamp:
print("%-14s" % ("TIME(s)"), end="")
print("%-6s %-16s %4s %3s %s" % ("PID", "COMM", "FD", "ERR", "PATH"))
start_ts = 0
# format output
while 1:
(task, pid, cpu, flags, ts, msg) = b.trace_fields()
(ret_s, filename) = msg.split(" ", 1)
ret = int(ret_s)
if (args.failed and (ret >= 0)):
continue
# split return value into FD and errno columns
if ret >= 0:
fd_s = ret
err = 0
else:
fd_s = "-1"
err = - ret
# print columns
if args.timestamp:
if start_ts == 0:
start_ts = ts
print("%-14.9f" % (ts - start_ts), end="")
print("%-6d %-16s %4s %3s %s" % (pid, task, fd_s, err, filename))
|
ThinkOpen-Solutions/odoo | refs/heads/stable | addons/sale_stock/__init__.py | 376 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_stock
import report
import company
import res_config
|
anthrotype/freetype-py | refs/heads/master | freetype/ft_enums/tt_mac_ids.py | 4 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011-2015 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
"""
A list of valid values for the 'encoding_id' for TT_PLATFORM_MACINTOSH
charmaps and name entries.
TT_MAC_ID_ROMAN
TT_MAC_ID_TELUGU
TT_MAC_ID_GURMUKHI
TT_MAC_ID_TIBETAN
TT_MAC_ID_SIMPLIFIED_CHINESE
TT_MAC_ID_SINDHI
TT_MAC_ID_SINHALESE
TT_MAC_ID_RUSSIAN
TT_MAC_ID_KANNADA
TT_MAC_ID_VIETNAMESE
TT_MAC_ID_MONGOLIAN
TT_MAC_ID_DEVANAGARI
TT_MAC_ID_HEBREW
TT_MAC_ID_TAMIL
TT_MAC_ID_THAI
TT_MAC_ID_BURMESE
TT_MAC_ID_MALDIVIAN
TT_MAC_ID_TRADITIONAL_CHINESE
TT_MAC_ID_JAPANESE
TT_MAC_ID_GREEK
TT_MAC_ID_LAOTIAN
TT_MAC_ID_KHMER
TT_MAC_ID_UNINTERP
TT_MAC_ID_ORIYA
TT_MAC_ID_RSYMBOL
TT_MAC_ID_MALAYALAM
TT_MAC_ID_GEEZ
TT_MAC_ID_KOREAN
TT_MAC_ID_GUJARATI
TT_MAC_ID_BENGALI
TT_MAC_ID_ARABIC
TT_MAC_ID_GEORGIAN
TT_MAC_ID_ARMENIAN
TT_MAC_ID_SLAVIC
"""
TT_MAC_IDS = {
'TT_MAC_ID_ROMAN' : 0,
'TT_MAC_ID_JAPANESE' : 1,
'TT_MAC_ID_TRADITIONAL_CHINESE' : 2,
'TT_MAC_ID_KOREAN' : 3,
'TT_MAC_ID_ARABIC' : 4,
'TT_MAC_ID_HEBREW' : 5,
'TT_MAC_ID_GREEK' : 6,
'TT_MAC_ID_RUSSIAN' : 7,
'TT_MAC_ID_RSYMBOL' : 8,
'TT_MAC_ID_DEVANAGARI' : 9,
'TT_MAC_ID_GURMUKHI' : 10,
'TT_MAC_ID_GUJARATI' : 11,
'TT_MAC_ID_ORIYA' : 12,
'TT_MAC_ID_BENGALI' : 13,
'TT_MAC_ID_TAMIL' : 14,
'TT_MAC_ID_TELUGU' : 15,
'TT_MAC_ID_KANNADA' : 16,
'TT_MAC_ID_MALAYALAM' : 17,
'TT_MAC_ID_SINHALESE' : 18,
'TT_MAC_ID_BURMESE' : 19,
'TT_MAC_ID_KHMER' : 20,
'TT_MAC_ID_THAI' : 21,
'TT_MAC_ID_LAOTIAN' : 22,
'TT_MAC_ID_GEORGIAN' : 23,
'TT_MAC_ID_ARMENIAN' : 24,
'TT_MAC_ID_MALDIVIAN' : 25,
'TT_MAC_ID_SIMPLIFIED_CHINESE' : 25,
'TT_MAC_ID_TIBETAN' : 26,
'TT_MAC_ID_MONGOLIAN' : 27,
'TT_MAC_ID_GEEZ' : 28,
'TT_MAC_ID_SLAVIC' : 29,
'TT_MAC_ID_VIETNAMESE' : 30,
'TT_MAC_ID_SINDHI' : 31,
'TT_MAC_ID_UNINTERP' : 32}
globals().update(TT_MAC_IDS)
|
shadowwalkersb/conda-smithy | refs/heads/master | tests/recipes/click-test-feedstock/recipe/upload_or_check_non_existence.py | 2 | print('it works')
|
MoritzS/django | refs/heads/master | tests/admin_scripts/simple_app/management/commands/duplicate.py | 554 | from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, **options):
self.stdout.write('simple_app')
|
iddqd1/django-cms | refs/heads/develop | menus/templatetags/__init__.py | 12133432 | |
binarydud/django-oscar | refs/heads/master | tests/integration/__init__.py | 12133432 | |
ojengwa/django-1 | refs/heads/master | tests/max_lengths/__init__.py | 12133432 | |
lindsay-stevens/odk_tools | refs/heads/master | tests/__init__.py | 12133432 | |
suneeth51/neutron | refs/heads/master | neutron/plugins/ml2/drivers/brocade/__init__.py | 12133432 | |
xfournet/intellij-community | refs/heads/master | python/testData/refactoring/move/function/before/src/a.py | 83 | from lib1 import urlopen
def f(url):
'''Return the representation available at the URL.
'''
return urlopen(url).read()
def f_usage():
return f(14)
class C(object):
def g(self, x):
return x
class D(C):
def g(self, x, y):
return super(D, self).f(x) + y
class E(object):
def g(self):
return -1 |
VitalPet/account-financial-tools | refs/heads/8.0 | currency_rate_update/services/update_service_PL_NBP.py | 41 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 CamptoCamp. All rights reserved.
# @author Nicolas Bessi
#
# Abstract class to fetch rates from National Bank of Poland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .currency_getter_interface import Currency_getter_interface
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
import logging
_logger = logging.getLogger(__name__)
class PL_NBP_getter(Currency_getter_interface):
"""Implementation of Currency_getter_factory interface
for PL NBP service
"""
def rate_retrieve(self, dom, ns, curr):
""" Parse a dom node to retrieve
currencies data"""
res = {}
xpath_rate_currency = ("/tabela_kursow/pozycja[kod_waluty='%s']/"
"kurs_sredni/text()") % (curr.upper())
xpath_rate_ref = ("/tabela_kursow/pozycja[kod_waluty='%s']/"
"przelicznik/text()") % (curr.upper())
res['rate_currency'] = float(
dom.xpath(xpath_rate_currency, namespaces=ns)[0].replace(',', '.')
)
res['rate_ref'] = float(dom.xpath(xpath_rate_ref, namespaces=ns)[0])
return res
def get_updated_currency(self, currency_array, main_currency,
max_delta_days):
"""implementation of abstract method of Curreny_getter_interface"""
# LastA.xml is always the most recent one
url = 'http://www.nbp.pl/kursy/xml/LastA.xml'
# We do not want to update the main currency
if main_currency in currency_array:
currency_array.remove(main_currency)
# Move to new XML lib cf Launchpad bug #645263
from lxml import etree
_logger.debug("NBP.pl currency rate service : connecting...")
rawfile = self.get_url(url)
dom = etree.fromstring(rawfile)
ns = {} # Cool, there are no namespaces !
_logger.debug("NBP.pl sent a valid XML file")
rate_date = dom.xpath('/tabela_kursow/data_publikacji/text()',
namespaces=ns)[0]
rate_date_datetime = datetime.strptime(rate_date,
DEFAULT_SERVER_DATE_FORMAT)
self.check_rate_date(rate_date_datetime, max_delta_days)
# We dynamically update supported currencies
self.supported_currency_array = dom.xpath(
'/tabela_kursow/pozycja/kod_waluty/text()',
namespaces=ns
)
self.supported_currency_array.append('PLN')
_logger.debug("Supported currencies = %s" %
self.supported_currency_array)
self.validate_cur(main_currency)
if main_currency != 'PLN':
main_curr_data = self.rate_retrieve(dom, ns, main_currency)
# 1 MAIN_CURRENCY = main_rate PLN
main_rate = (main_curr_data['rate_currency'] /
main_curr_data['rate_ref'])
for curr in currency_array:
self.validate_cur(curr)
if curr == 'PLN':
rate = main_rate
else:
curr_data = self.rate_retrieve(dom, ns, curr)
# 1 MAIN_CURRENCY = rate CURR
if main_currency == 'PLN':
rate = curr_data['rate_ref'] / curr_data['rate_currency']
else:
rate = (main_rate * curr_data['rate_ref'] /
curr_data['rate_currency'])
self.updated_currency[curr] = rate
_logger.debug("Rate retrieved : %s = %s %s" %
(main_currency, rate, curr))
return self.updated_currency, self.log_info
|
saga-project/bliss | refs/heads/master | examples/advanced/mandelbrot/run_mandel.py | 1 | #!/usr/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
This example script runs the Mandelbrot fractal generator sequentially.
In this example, we run the Mandelbrot generator for a 2048x2048 pixel
Mandelbrot fractal. We split it up in 4x4 tiles which we run sequentially, one
after another on the local machine, simply by calling the 'makemandel()'
function directly.
Try to change the values for the dimension (imgx, imgy) and the number of
tiles (tilesx, tilesy). You will see that the bigger the image is, the longer
it will take to compute. Changing the number of tiles doesn't change the total
compute time for the whole set, because compute time per tile will increase
with more tiles and decrease with fewer tiles.
A 2048x2048 Mandelbrot fractal takes about one minute to generate on a
3.0GHz CPU. A 32768x32768 fractal takes more than two hours to finish!
Different tiles seem to need different amounts of time to generate.
Do you know why?
"""
__author__ = "Ole Christian Weidner"
__copyright__ = "Copyright 2012, Ole Christian Weidner"
__license__ = "MIT"
import sys, time, Image
from mandelbrot import makemandel
# the dimension (in pixel) of the whole fractal
imgx = 2048
imgy = 2048
# the number of tiles in X and Y direction to split-up
# the fractal into.
tilesx = 4
tilesy = 4
################################################################################
##
if __name__ == "__main__":
# the total time to calculate all tiles:
t_total = 0.0
# the 'master' image that we assemble from the individual tiles.
# WARNING: for large images, this will consume a substantial amount
# of memory!
fullimage = Image.new("RGB", (imgx, imgy))
try:
for x in range(0, tilesx):
for y in range(0, tilesy):
print "Creating Mandelbrot tile [X:%s Y:%s]" % (x+1, y+1)
# this is where we call the Mandelbrot fractal generator
# directly via its module function makemandel().
t0 = time.time()
partimage = makemandel(imgx, imgy, (imgx/tilesx*x), (imgx/tilesx*(x+1)),
(imgy/tilesy*y), (imgy/tilesy*(y+1)))
dt = time.time() - t0
t_total += dt
print "...took %s seconds." % dt
fullimage.paste(partimage, (imgx/tilesx*x, imgy/tilesy*y, imgx/tilesx*(x+1), imgy/tilesy*(y+1)) )
print "Total execution time to generate a %sx%s Mandelbrot fractal: %s" % (imgx, imgy, t_total)
print "Saving master image as 'mandel_full.png (this might take a while)'..."
fullimage.save("mandel_full.png", "PNG")
sys.exit(0)
except KeyboardInterrupt:
# Ctrl-C caught. Try to save what we already have and exit.
print "Total execution time until Ctrl-C was hit: %s" % (t_total)
print "Saving partial master image as 'mandel_full.png' (this might take a while)..."
fullimage.save("mandel_full.png", "PNG")
sys.exit(-1)
|
blckshrk/DummySMA | refs/heads/master | src/particles/ParticlesFrame.py | 1 | '''
Created on 19 janv. 2014
@author: Alexandre Bonhomme
'''
from Tkinter import ALL
from gui.FrameTk import FrameTk
from particles.agents.ParticleAgent import ParticleAgent
from core.agents.AgentWall import AgentWall
class ExplorerFrame(FrameTk):
def __init__(self, height, width, box_size, sma):
FrameTk.__init__(self, height, width, box_size, title = 'Particles', bg = 'white')
self.sma = sma
def drawParticle(self, x, y, color = 'Blue'):
self.canvas.create_oval(x, \
y, \
x + self.BOX_SIZE, \
y + self.BOX_SIZE, \
width = 0, \
fill = color)
def _drawWall(self, x, y, color = 'Black'):
self.canvas.create_rectangle(x, \
y, \
x + self.BOX_SIZE, \
y + self.BOX_SIZE, \
fill = color)
def _draw(self):
title = "Particles - Tick " + str(self.sma.ticksCounter) + \
" Particles " + str(self.sma.particlesCounter)
self.main.wm_title(title)
self.canvas.delete(ALL)
grid = self.sma.env.grid
rows, cols = self.sma.env.rows, self.sma.env.cols
for x in xrange(0, rows):
for y in xrange(0, cols):
element = grid[x][y]
if isinstance(element, ParticleAgent):
self.drawParticle(x * self.BOX_SIZE, y * self.BOX_SIZE, element.color)
elif isinstance(element, AgentWall):
self._drawWall(x * self.BOX_SIZE, y * self.BOX_SIZE)
self.canvas.update_idletasks()
|
orb-framework/orb | refs/heads/master | orb/core/connection_types/sql/postgres/statements/add_column.py | 2 | from projex.lazymodule import lazy_import
from ..psqlconnection import PSQLStatement
orb = lazy_import('orb')
class ADD_COLUMN(PSQLStatement):
def __call__(self, column):
# determine all the flags for this column
flags = []
Flags = orb.Column.Flags
for key, value in Flags.items():
if column.flags() & value:
flag_sql = PSQLStatement.byName('Flag::{0}'.format(key))
if flag_sql:
flags.append(flag_sql)
sql = u'ADD COLUMN "{0}" {1} {2}'.format(column.field(), column.dbType('Postgres'), ' '.join(flags)).strip()
return sql, {}
PSQLStatement.registerAddon('ADD COLUMN', ADD_COLUMN())
|
beaufortfrancois/chromium-dashboard | refs/heads/master | customtags/templatetags/verbatim.py | 4 | """
jQuery templates use constructs like:
{{if condition}} print something{{/if}}
This, of course, completely screws up Django templates,
because Django thinks {{ and }} mean something.
Wrap {% verbatim %} and {% endverbatim %} around those
blocks of jQuery templates and this will try its best
to output the contents with no changes.
"""
from django import template
register = template.Library()
class VerbatimNode(template.Node):
def __init__(self, text):
self.text = text
def render(self, context):
return self.text
@register.tag
def verbatim(parser, token):
text = []
while 1:
token = parser.tokens.pop(0)
if token.contents == 'endverbatim':
break
if token.token_type == template.TOKEN_VAR:
text.append('{{')
elif token.token_type == template.TOKEN_BLOCK:
text.append('{%')
text.append(token.contents)
if token.token_type == template.TOKEN_VAR:
text.append('}}')
elif token.token_type == template.TOKEN_BLOCK:
text.append('%}')
return VerbatimNode(''.join(text))
|
eneldoserrata/marcos_openerp | refs/heads/master | addons/report_geraldo/lib/geraldo/site/newsite/django_1_0/tests/regressiontests/templates/models.py | 12133432 | |
hickford/youtube-dl | refs/heads/master | youtube_dl/extractor/videoweed.py | 133 | from __future__ import unicode_literals
from .novamov import NovaMovIE
class VideoWeedIE(NovaMovIE):
IE_NAME = 'videoweed'
IE_DESC = 'VideoWeed'
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'videoweed\.(?:es|com)'}
_HOST = 'www.videoweed.es'
_FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
_TITLE_REGEX = r'<h1 class="text_shadow">([^<]+)</h1>'
_TEST = {
'url': 'http://www.videoweed.es/file/b42178afbea14',
'md5': 'abd31a2132947262c50429e1d16c1bfd',
'info_dict': {
'id': 'b42178afbea14',
'ext': 'flv',
'title': 'optical illusion dissapeared image magic illusion',
'description': ''
},
}
|
taedori81/wagtail | refs/heads/master | wagtail/wagtailadmin/migrations/0001_create_admin_access_permissions.py | 27 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django
from django.db import migrations
def create_admin_access_permissions(apps, schema_editor):
ContentType = apps.get_model('contenttypes.ContentType')
Permission = apps.get_model('auth.Permission')
Group = apps.get_model('auth.Group')
# Add a fake content type to hang the 'can access Wagtail admin' permission off.
# The fact that this doesn't correspond to an actual defined model shouldn't matter, I hope...
if django.VERSION >= (1, 8):
wagtailadmin_content_type = ContentType.objects.create(
app_label='wagtailadmin',
model='admin'
)
else:
# Django 1.7 and below require a content type name
wagtailadmin_content_type = ContentType.objects.create(
app_label='wagtailadmin',
model='admin',
name='Wagtail admin'
)
# Create admin permission
admin_permission = Permission.objects.create(
content_type=wagtailadmin_content_type,
codename='access_admin',
name='Can access Wagtail admin'
)
# Assign it to Editors and Moderators groups
for group in Group.objects.filter(name__in=['Editors', 'Moderators']):
group.permissions.add(admin_permission)
class Migration(migrations.Migration):
dependencies = [
# Need to run wagtailcores initial data migration to make sure the groups are created
('wagtailcore', '0002_initial_data'),
]
operations = [
migrations.RunPython(create_admin_access_permissions),
]
|
KanchanChauhan/erpnext | refs/heads/develop | erpnext/docs/assets/img/human-resources/__init__.py | 12133432 | |
douglas-treadwell/marshmallow-models | refs/heads/master | tests/__init__.py | 12133432 | |
andrewmchen/incubator-airflow | refs/heads/master | tests/contrib/hooks/test_aws_hook.py | 42 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import boto3
from airflow import configuration
from airflow.contrib.hooks.aws_hook import AwsHook
try:
from moto import mock_emr
except ImportError:
mock_emr = None
class TestAwsHook(unittest.TestCase):
@mock_emr
def setUp(self):
configuration.load_test_config()
@unittest.skipIf(mock_emr is None, 'mock_emr package not present')
@mock_emr
def test_get_client_type_returns_a_boto3_client_of_the_requested_type(self):
client = boto3.client('emr', region_name='us-east-1')
if len(client.list_clusters()['Clusters']):
raise ValueError('AWS not properly mocked')
hook = AwsHook(aws_conn_id='aws_default')
client_from_hook = hook.get_client_type('emr')
self.assertEqual(client_from_hook.list_clusters()['Clusters'], [])
if __name__ == '__main__':
unittest.main()
|
donglin-zhang/testLinkHelper | refs/heads/master | ui_option.py | 1 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'options.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
|
z01nl1o02/tests | refs/heads/master | mxnet/emotion/symbol/ninnet.py | 5 | import os,sys,pdb
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
class NIN(nn.Block):
def __init__(self, channels, kernel_size, padding, strides=1, max_pooling=True, **kwargs):
super(NIN, self).__init__(**kwargs)
self.conv1 = nn.Conv2D(channels=channels,kernel_size=kernel_size, strides=strides, padding=padding,
activation ='relu')
self.conv2 = nn.Conv2D(channels=channels, kernel_size=1, strides=1, padding=0, activation='relu')
self.conv3 = nn.Conv2D(channels=channels, kernel_size=1, strides=1, padding=0, activation='relu')
self.pool4 = None
if max_pooling:
self.pool4 = nn.MaxPool2D(pool_size=3,strides=2)
return
def forward(self,x):
out = self.conv3( self.conv2( self.conv1(x) ) )
if self.pool4 is not None:
out = self.pool4(out)
return out
class NINNet(nn.Block):
def __init__(self, num_classes, **kwargs):
super(NINNet, self).__init__(**kwargs)
with self.name_scope():
b1 = NIN(96,11,0,strides=4)
b2 = NIN(256,5,2)
b3 = NIN(384,3,1)
b4 = nn.Dropout(0.5)
#replace Dense()
b5 = NIN(num_classes, 3, 1, max_pooling=False)
b6 = nn.GlobalAvgPool2D()
b7 = nn.Flatten()
self.net = nn.Sequential()
self.net.add(b1,b2,b3,b4,b5,b6,b7)
return
def forward(self, x):
out = x
for i, blk in enumerate(self.net):
out = blk(out)
return out
def get_symbol(num_class,ctx,**kwargs):
net = NINNet(num_class)
net.initialize(ctx = ctx)
return net
|
jaymin-panchal/zang-python | refs/heads/master | zang/domain/base_resource.py | 1 | import sys
import datetime
from zang.helpers.helpers import to_python
from enum import Enum
if sys.version_info > (3, 0):
basestring = (str, bytes)
class BaseResource(object):
_strs = []
_ints = []
_reals = []
_dates = []
_bools = []
_dicts = []
_map = {}
_arrays = {}
_enums = {}
def __init__(self):
self._bootstrap()
super(BaseResource, self).__init__()
def __str__(self):
s = ''
varNames = self._sortedVarsNames()
for varName in varNames:
value = vars(self)[varName]
if value is not None:
valueStr = self._valueString(value)
s += varName[1:] + ': ' + valueStr + '\n'
return s
def _sortedVarsNames(self):
"""Return sorted self variable names"""
keys = vars(self).keys()
if sys.version_info >= (3, 0):
keys = sorted(keys)
else:
keys.sort()
return keys
def _valueString(self, value):
"""If the value is not a primitive type, indent it's content"""
valueStr = str(value)
if not isinstance(
value,
(int, float, bool, basestring, datetime.datetime, Enum)):
valueStr = '\n\t' + valueStr.replace('\n', '\n\t')
if valueStr.count('\n') > 1: # remove last \n\t
valueStr = valueStr[:-2]
return valueStr
def _bootstrap(self):
"""Bootstraps the model object based on configured values."""
for attr in self._keys():
privateName = '_' + attr
setattr(self, privateName, None)
def _keys(self):
return self._strs + self._ints + self._dates + self._bools + \
list(self._map.keys()) + list(self._arrays.keys())
@classmethod
def new_from_dict(cls, d, **kwargs):
d = to_python(
obj=cls(),
in_dict=d,
str_keys=cls._strs,
int_keys=cls._ints,
real_keys=cls._reals,
date_keys=cls._dates,
bool_keys=cls._bools,
dict_keys=cls._dicts,
object_map=cls._map,
array_map=cls._arrays,
enums=cls._enums,
)
d.__dict__.update(kwargs)
return d
|
matthagy/rtchemstats | refs/heads/master | setupegg.py | 3 | #!/usr/bin/env python
# Copyright (C) 2012 Matt Hagy <hagy@gatech.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Wrapper to run setup.py using setuptools."""
import setuptools
execfile('setup.py')
|
elastic-coders/graphqllib | refs/heads/master | tests/core_language/test_parser.py | 3 | from pytest import raises
from graphql.core.language.error import LanguageError
from graphql.core.language.source import Source
from graphql.core.language.parser import parse
from graphql.core.language import ast
from fixtures import KITCHEN_SINK
def test_parse_provides_useful_errors():
with raises(LanguageError) as excinfo:
parse("""{ ...MissingOn }
fragment MissingOn Type
""")
assert 'Syntax Error GraphQL (2:20) Expected "on", found Name "Type"' in str(excinfo.value)
with raises(LanguageError) as excinfo:
parse('{ field: {} }')
assert 'Syntax Error GraphQL (1:10) Expected Name, found {' in str(excinfo.value)
with raises(LanguageError) as excinfo:
parse('notanoperation Foo { field }')
assert 'Syntax Error GraphQL (1:1) Unexpected Name "notanoperation"' in str(excinfo.value)
with raises(LanguageError) as excinfo:
parse('...')
assert 'Syntax Error GraphQL (1:1) Unexpected ...' in str(excinfo.value)
def test_parse_provides_useful_error_when_using_source():
with raises(LanguageError) as excinfo:
parse(Source('query', 'MyQuery.graphql'))
assert 'Syntax Error MyQuery.graphql (1:6) Expected Name, found EOF' in str(excinfo.value)
def test_parses_variable_inline_values():
parse('{ field(complex: { a: { b: [ $var ] } }) }')
def test_parses_constant_default_values():
with raises(LanguageError) as excinfo:
parse('query Foo($x: Complex = { a: { b: [ $var ] } }) { field }')
assert 'Syntax Error GraphQL (1:37) Unexpected $' in str(excinfo.value)
def test_duplicate_keys_in_input_object_is_syntax_error():
with raises(LanguageError) as excinfo:
parse('{ field(arg: { a: 1, a: 2 }) }')
assert 'Syntax Error GraphQL (1:22) Duplicate input object field a.' in str(excinfo.value)
def test_parses_kitchen_sink():
parse(KITCHEN_SINK)
def test_parse_creates_ast():
source = Source("""{
node(id: 4) {
id,
name
}
}
""")
result = parse(source)
assert result == \
ast.Document(
loc={'start': 0, 'end': 41, 'source': source},
definitions=
[ast.OperationDefinition(
loc={'start': 0, 'end': 40, 'source': source},
operation='query',
name=None,
variable_definitions=None,
directives=[],
selection_set=ast.SelectionSet(
loc={'start': 0, 'end': 40, 'source': source},
selections=
[ast.Field(
loc={'start': 4, 'end': 38, 'source': source},
alias=None,
name=ast.Name(
loc={'start': 4, 'end': 8, 'source': source},
value='node'),
arguments=[ast.Argument(
name=ast.Name(loc={'start': 9, 'end': 11, 'source': source},
value='id'),
value=ast.IntValue(
loc={'start': 13, 'end': 14, 'source': source},
value='4'),
loc={'start': 9, 'end': 14, 'source': source})],
directives=[],
selection_set=ast.SelectionSet(
loc={'start': 16, 'end': 38, 'source': source},
selections=
[ast.Field(
loc={'start': 22, 'end': 24, 'source': source},
alias=None,
name=ast.Name(
loc={'start': 22, 'end': 24, 'source': source},
value='id'),
arguments=[],
directives=[],
selection_set=None),
ast.Field(
loc={'start': 30, 'end': 34, 'source': source},
alias=None,
name=ast.Name(
loc={'start': 30, 'end': 34, 'source': source},
value='name'),
arguments=[],
directives=[],
selection_set=None)]))]))])
|
dynius/p2pool | refs/heads/master | SOAPpy/GSIServer.py | 289 | from __future__ import nested_scopes
"""
GSIServer - Contributed by Ivan R. Judson <judson@mcs.anl.gov>
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: GSIServer.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
#import xml.sax
import re
import socket
import sys
import SocketServer
from types import *
import BaseHTTPServer
# SOAPpy modules
from Parser import parseSOAPRPC
from Config import SOAPConfig
from Types import faultType, voidType, simplify
from NS import NS
from SOAPBuilder import buildSOAP
from Utilities import debugHeader, debugFooter
try: from M2Crypto import SSL
except: pass
#####
from Server import *
from pyGlobus.io import GSITCPSocketServer, ThreadingGSITCPSocketServer
from pyGlobus import ioc
def GSIConfig():
config = SOAPConfig()
config.channel_mode = ioc.GLOBUS_IO_SECURE_CHANNEL_MODE_GSI_WRAP
config.delegation_mode = ioc.GLOBUS_IO_SECURE_DELEGATION_MODE_FULL_PROXY
config.tcpAttr = None
config.authMethod = "_authorize"
return config
Config = GSIConfig()
class GSISOAPServer(GSITCPSocketServer, SOAPServerBase):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0,
encoding = 'UTF-8', config = Config, namespace = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
GSITCPSocketServer.__init__(self, addr, RequestHandler,
self.config.channel_mode,
self.config.delegation_mode,
tcpAttr = self.config.tcpAttr)
def get_request(self):
sock, addr = GSITCPSocketServer.get_request(self)
return sock, addr
class ThreadingGSISOAPServer(ThreadingGSITCPSocketServer, SOAPServerBase):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0,
encoding = 'UTF-8', config = Config, namespace = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
ThreadingGSITCPSocketServer.__init__(self, addr, RequestHandler,
self.config.channel_mode,
self.config.delegation_mode,
tcpAttr = self.config.tcpAttr)
def get_request(self):
sock, addr = ThreadingGSITCPSocketServer.get_request(self)
return sock, addr
|
akosyakov/intellij-community | refs/heads/master | python/testData/resolve/Global.py | 83 | xx = 1
def f():
global x<ref>x
print xx
|
HalcyonChimera/osf.io | refs/heads/develop | addons/forward/migrations/0003_auto_20170713_1125.py | 22 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-13 16:25
from __future__ import unicode_literals
import pytz
import datetime
from django.db import migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('addons_forward', '0002_nodesettings_owner'),
]
operations = [
migrations.AddField(
model_name='nodesettings',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, default=datetime.datetime(1970, 1, 1, 0, 0, tzinfo=pytz.utc), verbose_name='created'),
preserve_default=False,
),
migrations.AddField(
model_name='nodesettings',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
]
|
Universal-Model-Converter/UMC3.0a | refs/heads/master | data/Python/x86/Lib/binhex.py | 216 | """Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import sys
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
LINELEN=64
RUNCHAR=chr(0x90) # run-length introducer
#
# This code is no longer byte-order dependent
#
# Workarounds for non-mac machines.
try:
from Carbon.File import FSSpec, FInfo
from MacOS import openrf
def getfileinfo(name):
finfo = FSSpec(name).FSpGetFInfo()
dir, file = os.path.split(name)
# XXX Get resource/data sizes
fp = open(name, 'rb')
fp.seek(0, 2)
dlen = fp.tell()
fp = openrf(name, '*rb')
fp.seek(0, 2)
rlen = fp.tell()
return file, finfo, dlen, rlen
def openrsrc(name, *mode):
if not mode:
mode = '*rb'
else:
mode = '*' + mode[0]
return openrf(name, mode)
except ImportError:
#
# Glue code for non-macintosh usage
#
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
# Quick check for textfile
fp = open(name)
data = open(name).read(256)
for c in data:
if not c.isspace() and (c<' ' or ord(c) > 0x7f):
break
else:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return ''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
self.hqxdata = ''
self.linelen = LINELEN-1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen//3)*3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata)-self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last]+'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + ':\n')
def close(self):
if self.data:
self.hqxdata = \
self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = ''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
if type(ofp) == type(''):
ofname = ofp
ofp = open(ofname, 'w')
ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error, 'Filename too long'
d = chr(nl) + name + '\0'
d2 = finfo.Type + finfo.Creator
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error, 'Writing data at the wrong time'
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Writing resource data at the wrong time'
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Close at the wrong time'
if self.rlen != 0:
raise Error, \
"Incorrect resource-datasize, diff=%r" % (self.rlen,)
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""(infilename, outfilename) - Create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while 1:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = ''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd+2)//3)*4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while 1:
try:
decdatacur, self.eof = \
binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error, \
'Premature EOF on binhex file'
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error, 'Premature EOF on binhex file'
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = ''
self.post_buffer = ''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd-len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = ''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + '\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if type(ifp) == type(''):
ifp = open(ifp)
#
# Find initial colon.
#
while 1:
ch = ifp.read(1)
if not ch:
raise Error, "No binhex data found"
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == '\r':
continue
if ch == ':':
break
if ch != '\n':
dummy = ifp.readline()
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error, 'CRC error, computed %x, read %x' \
%(self.crc, filecrc)
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1+4+4+2+4+4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error, 'Read data at wrong time'
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = ''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error, 'close_data at wrong time'
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Read resource data at wrong time'
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
ofp = open(out, 'wb')
# XXXX Do translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while 1:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close()
def _test():
fname = sys.argv[1]
binhex(fname, fname+'.hqx')
hexbin(fname+'.hqx', fname+'.viahqx')
#hexbin(fname, fname+'.unpacked')
sys.exit(1)
if __name__ == '__main__':
_test()
|
ellepdesk/python-snap7 | refs/heads/master | test/test_partner.py | 1 | import logging
import unittest as unittest
import mock
import snap7.partner
from snap7.snap7exceptions import Snap7Exception
logging.basicConfig(level=logging.WARNING)
class TestPartner(unittest.TestCase):
def setUp(self):
self.partner = snap7.partner.Partner()
self.partner.start()
def test_as_b_send(self):
self.partner.as_b_send()
@unittest.skip("we don't recv something yet")
def test_b_recv(self):
self.partner.b_recv()
def test_b_send(self):
self.partner.b_send()
def test_check_as_b_recv_completion(self):
self.partner.check_as_b_recv_completion()
def test_check_as_b_send_completion(self):
self.partner.check_as_b_send_completion()
def test_create(self):
self.partner.create()
def test_destroy(self):
self.partner.destroy()
def test_error_text(self):
snap7.common.error_text(0, context="partner")
def test_get_last_error(self):
self.partner.get_last_error()
def test_get_param(self):
expected = (
(snap7.snap7types.LocalPort, 0),
(snap7.snap7types.RemotePort, 102),
(snap7.snap7types.PingTimeout, 750),
(snap7.snap7types.SendTimeout, 10),
(snap7.snap7types.RecvTimeout, 3000),
(snap7.snap7types.SrcRef, 256),
(snap7.snap7types.DstRef, 0),
(snap7.snap7types.SrcTSap, 0),
(snap7.snap7types.PDURequest, 480),
(snap7.snap7types.WorkInterval, 100),
(snap7.snap7types.BSendTimeout, 3000),
(snap7.snap7types.BRecvTimeout, 3000),
(snap7.snap7types.RecoveryTime, 500),
(snap7.snap7types.KeepAliveTime, 5000),
)
for param, value in expected:
self.assertEqual(self.partner.get_param(param), value)
self.assertRaises(Exception, self.partner.get_param,
snap7.snap7types.MaxClients)
def test_get_stats(self):
self.partner.get_stats()
def test_get_status(self):
self.partner.get_status()
def test_get_times(self):
self.partner.get_times()
def test_set_param(self):
values = (
(snap7.snap7types.PingTimeout, 800),
(snap7.snap7types.SendTimeout, 15),
(snap7.snap7types.RecvTimeout, 3500),
(snap7.snap7types.WorkInterval, 50),
(snap7.snap7types.SrcRef, 128),
(snap7.snap7types.DstRef, 128),
(snap7.snap7types.SrcTSap, 128),
(snap7.snap7types.PDURequest, 470),
(snap7.snap7types.BSendTimeout, 2000),
(snap7.snap7types.BRecvTimeout, 2000),
(snap7.snap7types.RecoveryTime, 400),
(snap7.snap7types.KeepAliveTime, 4000),
)
for param, value in values:
self.partner.set_param(param, value)
self.assertRaises(Exception, self.partner.set_param,
snap7.snap7types.RemotePort, 1)
def test_set_recv_callback(self):
self.partner.set_recv_callback()
def test_set_send_callback(self):
self.partner.set_send_callback()
def test_start(self):
self.partner.start()
def test_start_to(self):
self.partner.start_to('0.0.0.0', '0.0.0.0', 0, 0)
def test_stop(self):
self.partner.stop()
def test_wait_as_b_send_completion(self):
self.assertRaises(Snap7Exception, self.partner.wait_as_b_send_completion)
class TestLibraryIntegration(unittest.TestCase):
def setUp(self):
# replace the function load_library with a mock
self.loadlib_patch = mock.patch('snap7.partner.load_library')
self.loadlib_func = self.loadlib_patch.start()
# have load_library return another mock
self.mocklib = mock.MagicMock()
self.loadlib_func.return_value = self.mocklib
# have the Par_Create of the mock return None
self.mocklib.Par_Create.return_value = None
def tearDown(self):
# restore load_library
self.loadlib_patch.stop()
def test_create(self):
partner = snap7.partner.Partner()
self.mocklib.Par_Create.assert_called_once()
def test_gc(self):
partner = snap7.partner.Partner()
del partner
self.mocklib.Par_Destroy.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
leighpauls/k2cro4 | refs/heads/master | third_party/WebKit/Source/ThirdParty/gtest/test/gtest_color_test.py | 3259 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
jb-boin/mariadb-10.0 | refs/heads/master | storage/tokudb/mysql-test/tokudb/t/change_column_multiple_columns.py | 56 | import sys
import itertools
cols = [ 'a', 'b', 'c', 'd', 'e' ]
old_types = [ 'VARCHAR(1)', 'VARBINARY(1)', 'INT', 'CHAR(1)', 'BINARY(1)' ]
new_types = [ 'VARCHAR(2)', 'VARBINARY(2)', 'BIGINT', 'CHAR(2)', 'BINARY(2)' ]
def main():
print "# this test generated by change_multiple_columns.py"
print "# this test generated multiple column changes which should all fail since we support only one at a time"
print "--disable_warnings"
print "DROP TABLE IF EXISTS t;"
print "--enable_warnings"
print "SET SESSION TOKUDB_DISABLE_SLOW_ALTER=1;"
print "SET SESSION DEFAULT_STORAGE_ENGINE='TokuDB';"
create_cmd = "CREATE TABLE t ("
for i in range(len(cols)):
create_cmd += "%s %s" % (cols[i], old_types[i])
if i < len(cols)-1:
create_cmd += ","
print "%s);" % (create_cmd)
l = range(len(cols))
for t in combinations(l, range(2,len(cols))):
alter_cmd = gen_alter(t)
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
print "%s;" % (alter_cmd)
print "DROP TABLE t;"
return 0
def gen_alter(t):
alter = "ALTER TABLE t "
for c in range(len(t)):
i = t[c]
alter += "CHANGE COLUMN %s %s %s" % (cols[i], cols[i], new_types[i])
if c < len(t)-1:
alter += ","
return alter
def combinations(l, r):
c = []
for k in r:
c += [ x for x in itertools.combinations(l, k) ]
return c
sys.exit(main())
|
eg-zhang/scikit-learn | refs/heads/master | sklearn/utils/tests/test_optimize.py | 239 | import numpy as np
from sklearn.utils.optimize import newton_cg
from scipy.optimize import fmin_ncg
from sklearn.utils.testing import assert_array_almost_equal
def test_newton_cg():
# Test that newton_cg gives same result as scipy's fmin_ncg
rng = np.random.RandomState(0)
A = rng.normal(size=(10, 10))
x0 = np.ones(10)
def func(x):
Ax = A.dot(x)
return .5 * (Ax).dot(Ax)
def grad(x):
return A.T.dot(A.dot(x))
def hess(x, p):
return p.dot(A.T.dot(A.dot(x.all())))
def grad_hess(x):
return grad(x), lambda x: A.T.dot(A.dot(x))
assert_array_almost_equal(
newton_cg(grad_hess, func, grad, x0, tol=1e-10)[0],
fmin_ncg(f=func, x0=x0, fprime=grad, fhess_p=hess)
)
|
NaturalGIS/naturalgis_qgis | refs/heads/master | python/plugins/processing/gui/HistoryDialog.py | 15 | # -*- coding: utf-8 -*-
"""
***************************************************************************
HistoryDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
import warnings
from qgis.core import QgsApplication
from qgis.gui import QgsGui, QgsHelp
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt, QCoreApplication
from qgis.PyQt.QtWidgets import QAction, QPushButton, QDialogButtonBox, QStyle, QMessageBox, QFileDialog, QMenu, QTreeWidgetItem
from qgis.PyQt.QtGui import QIcon
from processing.gui import TestTools
from processing.core.ProcessingLog import ProcessingLog, LOG_SEPARATOR
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgHistory.ui'))
class HistoryDialog(BASE, WIDGET):
def __init__(self):
super(HistoryDialog, self).__init__(None)
self.setupUi(self)
QgsGui.instance().enableAutoGeometryRestore(self)
self.groupIcon = QgsApplication.getThemeIcon('mIconFolder.svg')
self.keyIcon = self.style().standardIcon(QStyle.SP_FileIcon)
self.clearButton = QPushButton(self.tr('Clear'))
self.clearButton.setToolTip(self.tr('Clear history'))
self.buttonBox.addButton(self.clearButton, QDialogButtonBox.ActionRole)
self.helpButton = QPushButton(self.tr('Help'))
self.helpButton.setToolTip(self.tr('Show help'))
self.buttonBox.addButton(self.helpButton, QDialogButtonBox.HelpRole)
self.saveButton = QPushButton(QCoreApplication.translate('HistoryDialog', 'Save As…'))
self.saveButton.setToolTip(self.tr('Save history'))
self.buttonBox.addButton(self.saveButton, QDialogButtonBox.ActionRole)
self.tree.doubleClicked.connect(self.executeAlgorithm)
self.tree.currentItemChanged.connect(self.changeText)
self.clearButton.clicked.connect(self.clearLog)
self.saveButton.clicked.connect(self.saveLog)
self.helpButton.clicked.connect(self.openHelp)
self.tree.setContextMenuPolicy(Qt.CustomContextMenu)
self.tree.customContextMenuRequested.connect(self.showPopupMenu)
self.fillTree()
def clearLog(self):
reply = QMessageBox.question(self,
self.tr('Confirmation'),
self.tr('Are you sure you want to clear the history?'),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No
)
if reply == QMessageBox.Yes:
ProcessingLog.clearLog()
self.fillTree()
def saveLog(self):
fileName, filter = QFileDialog.getSaveFileName(self,
self.tr('Save File'), '.', self.tr('Log files (*.log *.LOG)'))
if fileName == '':
return
if not fileName.lower().endswith('.log'):
fileName += '.log'
ProcessingLog.saveLog(fileName)
def openHelp(self):
QgsHelp.openHelp("processing/history.html")
def fillTree(self):
self.tree.clear()
entries = ProcessingLog.getLogEntries()
groupItem = QTreeWidgetItem()
groupItem.setText(0, 'ALGORITHM')
groupItem.setIcon(0, self.groupIcon)
for entry in entries:
item = TreeLogEntryItem(entry, True)
item.setIcon(0, self.keyIcon)
groupItem.insertChild(0, item)
self.tree.addTopLevelItem(groupItem)
groupItem.setExpanded(True)
def executeAlgorithm(self):
item = self.tree.currentItem()
if isinstance(item, TreeLogEntryItem):
if item.isAlg:
script = 'import processing\n'
# adding to this list? Also update the BatchPanel.py imports!!
script += 'from qgis.core import QgsProcessingOutputLayerDefinition, QgsProcessingFeatureSourceDefinition, QgsProperty, QgsCoordinateReferenceSystem, QgsFeatureRequest\n'
script += 'from qgis.PyQt.QtCore import QDate, QTime, QDateTime\n'
script += 'from qgis.PyQt.QtGui import QColor\n'
script += item.entry.text.replace('processing.run(', 'processing.execAlgorithmDialog(')
self.close()
exec(script)
def changeText(self):
item = self.tree.currentItem()
if isinstance(item, TreeLogEntryItem):
self.text.setText(item.entry.text.replace(LOG_SEPARATOR, '\n'))
def createTest(self):
item = self.tree.currentItem()
if isinstance(item, TreeLogEntryItem):
if item.isAlg:
TestTools.createTest(item.entry.text)
def showPopupMenu(self, point):
item = self.tree.currentItem()
if isinstance(item, TreeLogEntryItem):
if item.isAlg:
popupmenu = QMenu()
createTestAction = QAction(QCoreApplication.translate('HistoryDialog', 'Create Test…'), self.tree)
createTestAction.triggered.connect(self.createTest)
popupmenu.addAction(createTestAction)
popupmenu.exec_(self.tree.mapToGlobal(point))
class TreeLogEntryItem(QTreeWidgetItem):
def __init__(self, entry, isAlg):
QTreeWidgetItem.__init__(self)
self.entry = entry
self.isAlg = isAlg
self.setText(0, '[' + entry.date + '] ' + entry.text.split(LOG_SEPARATOR)[0])
|
MiniPlayer/log-island | refs/heads/master | logisland-plugins/logisland-scripting-processors-plugin/src/main/resources/nltk/tbl/api.py | 386048 | |
Migdalo/wd-label-builder | refs/heads/master | wdlabelbuilder/__init__.py | 386048 | |
JaDogg/__py_playground | refs/heads/master | reference/parson/eg_trees.py | 1 | """
Testing out tree parsing.
"""
from operator import add, sub
from parson import anyone, capture, delay, nest, one_of, one_that
end = ~anyone
def match(p, x): return (p + end)([x])
def an_instance(type_): return one_that(lambda x: isinstance(x, type_))
def capture1(p): return capture(p) >> (lambda x: x[0]) # Ouch
var = capture1(anyone)
## (var + var)(eg)
#. ('+', 2)
calc = delay(lambda:
nest(one_of('+') + calc + calc + end) >> add
| nest(one_of('-') + calc + calc + end) >> sub
| capture1(an_instance(int)))
eg = ['+', 2, 3]
## match(calc, eg)
#. (5,)
eg2 = ['+', ['-', 2, 4], 3]
## match(calc, eg2)
#. (1,)
# Exercise: transforming trees with generic walks
flatten1 = delay(lambda:
nest(one_of('+') + flatten1.star() + end)
| capture1(an_instance(int)))
## match(flatten1, ['+', ['+', ['+', 1, ['+', 2]]]])
#. (1, 2)
# Figure 2.7 in the OMeta thesis, more or less:
def walk(p, q=capture1(an_instance(int))):
return ( nest(one_of('+') + p.star() + end) >> tag('+')
| nest(one_of('-') + p.star() + end) >> tag('-')
| q)
def tag(constant):
return lambda *args: (constant,) + args
flatten2 = delay(lambda:
nest(one_of('+') + flatten2 + end)
| nest(one_of('+') + inside.star() + end) >> tag('+')
| walk(flatten2))
inside = delay(lambda:
nest(one_of('+') + inside.star() + end)
| flatten2)
## match(flatten2, ['+', ['+', ['+', 1, ['+', 2], ['+', 3, 4]]]])
#. (('+', 1, 2, 3, 4),)
|
18padx08/PPTex | refs/heads/master | PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/tensor/tests/test_tensor.py | 17 | from sympy import Matrix, eye
from sympy.combinatorics import Permutation
from sympy.core import S, Rational, Symbol, Basic
from sympy.core.containers import Tuple
from sympy.core.symbol import symbols
from sympy.external import import_module
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.printing.pretty.pretty import pretty
from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, \
get_symmetric_group_sgs, TensorType, TensorIndex, tensor_mul, TensAdd, \
riemann_cyclic_replace, riemann_cyclic, TensMul, tensorsymmetry, tensorhead, \
TensorManager, TensExpr, TIDS
from sympy.utilities.pytest import raises, skip
def _is_equal(arg1, arg2):
if isinstance(arg1, TensExpr):
return arg1.equals(arg2)
elif isinstance(arg2, TensExpr):
return arg2.equals(arg1)
return arg1 == arg2
#################### Tests from tensor_can.py #######################
def test_canonicalize_no_slot_sym():
# A_d0 * B^d0; T_c = A^d0*B_d0
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b, d0, d1 = tensor_indices('a,b,d0,d1', Lorentz)
sym1 = tensorsymmetry([1])
S1 = TensorType([Lorentz], sym1)
A, B = S1('A,B')
t = A(-d0)*B(d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0)*B(-L_0)'
# A^a * B^b; T_c = T
t = A(a)*B(b)
tc = t.canon_bp()
assert tc == t
# B^b * A^a
t1 = B(b)*A(a)
tc = t1.canon_bp()
assert str(tc) == 'A(a)*B(b)'
# A symmetric
# A^{b}_{d0}*A^{d0, a}; T_c = A^{a d0}*A{b}_{d0}
sym2 = tensorsymmetry([1]*2)
S2 = TensorType([Lorentz]*2, sym2)
A = S2('A')
t = A(b, -d0)*A(d0, a)
tc = t.canon_bp()
assert str(tc) == 'A(a, L_0)*A(b, -L_0)'
# A^{d1}_{d0}*B^d0*C_d1
# T_c = A^{d0 d1}*B_d0*C_d1
B, C = S1('B,C')
t = A(d1, -d0)*B(d0)*C(-d1)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_0)*C(-L_1)'
# A without symmetry
# A^{d1}_{d0}*B^d0*C_d1 ord=[d0,-d0,d1,-d1]; g = [2,1,0,3,4,5]
# T_c = A^{d0 d1}*B_d1*C_d0; can = [0,2,3,1,4,5]
nsym2 = tensorsymmetry([1],[1])
NS2 = TensorType([Lorentz]*2, nsym2)
A = NS2('A')
B, C = S1('B, C')
t = A(d1, -d0)*B(d0)*C(-d1)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_1)*C(-L_0)'
# A, B without symmetry
# A^{d1}_{d0}*B_{d1}^{d0}
# T_c = A^{d0 d1}*B_{d0 d1}
B = NS2('B')
t = A(d1, -d0)*B(-d1, d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_0, -L_1)'
# A_{d0}^{d1}*B_{d1}^{d0}
# T_c = A^{d0 d1}*B_{d1 d0}
t = A(-d0, d1)*B(-d1, d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_1, -L_0)'
# A, B, C without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b}
# T_c=A^{d0 d1}*B_{a d1}*C_{d0 b}
C = NS2('C')
t = A(d1, d0)*B(-a, -d0)*C(-d1, -b)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-a, -L_1)*C(-L_0, -b)'
# A symmetric, B and C without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b}
# T_c = A^{d0 d1}*B_{a d0}*C_{d1 b}
A = S2('A')
t = A(d1, d0)*B(-a, -d0)*C(-d1, -b)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-a, -L_0)*C(-L_1, -b)'
# A and C symmetric, B without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# T_c = A^{d0 d1}*B_{a d0}*C_{b d1}
C = S2('C')
t = A(d1, d0)*B(-a, -d0)*C(-d1, -b)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-a, -L_0)*C(-b, -L_1)'
def test_canonicalize_no_dummies():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b, c, d = tensor_indices('a, b, c, d', Lorentz)
sym1 = tensorsymmetry([1])
sym2 = tensorsymmetry([1]*2)
sym2a = tensorsymmetry([2])
# A commuting
# A^c A^b A^a
# T_c = A^a A^b A^c
S1 = TensorType([Lorentz], sym1)
A = S1('A')
t = A(c)*A(b)*A(a)
tc = t.canon_bp()
assert str(tc) == 'A(a)*A(b)*A(c)'
# A anticommuting
# A^c A^b A^a
# T_c = -A^a A^b A^c
A = S1('A', 1)
t = A(c)*A(b)*A(a)
tc = t.canon_bp()
assert str(tc) == '-A(a)*A(b)*A(c)'
# A commuting and symmetric
# A^{b,d}*A^{c,a}
# T_c = A^{a c}*A^{b d}
S2 = TensorType([Lorentz]*2, sym2)
A = S2('A')
t = A(b, d)*A(c, a)
tc = t.canon_bp()
assert str(tc) == 'A(a, c)*A(b, d)'
# A anticommuting and symmetric
# A^{b,d}*A^{c,a}
# T_c = -A^{a c}*A^{b d}
A = S2('A', 1)
t = A(b, d)*A(c, a)
tc = t.canon_bp()
assert str(tc) == '-A(a, c)*A(b, d)'
# A^{c,a}*A^{b,d}
# T_c = A^{a c}*A^{b d}
t = A(c, a)*A(b, d)
tc = t.canon_bp()
assert str(tc) == 'A(a, c)*A(b, d)'
def test_no_metric_symmetry():
# no metric symmetry; A no symmetry
# A^d1_d0 * A^d0_d1
# T_c = A^d0_d1 * A^d1_d0
Lorentz = TensorIndexType('Lorentz', metric=None, dummy_fmt='L')
d0, d1, d2, d3 = tensor_indices('d:4', Lorentz)
A = tensorhead('A', [Lorentz]*2, [[1], [1]])
t = A(d1, -d0)*A(d0, -d1)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_0)'
# A^d1_d2 * A^d0_d3 * A^d2_d1 * A^d3_d0
# T_c = A^d0_d1 * A^d1_d0 * A^d2_d3 * A^d3_d2
t = A(d1, -d2)*A(d0, -d3)*A(d2,-d1)*A(d3,-d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_0)*A(L_2, -L_3)*A(L_3, -L_2)'
# A^d0_d2 * A^d1_d3 * A^d3_d0 * A^d2_d1
# T_c = A^d0_d1 * A^d1_d2 * A^d2_d3 * A^d3_d0
t = A(d0, -d1)*A(d1, -d2)*A(d2, -d3)*A(d3,-d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_2)*A(L_2, -L_3)*A(L_3, -L_0)'
def test_canonicalize1():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \
tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Lorentz)
sym1 = tensorsymmetry([1])
base3, gens3 = get_symmetric_group_sgs(3)
sym2 = tensorsymmetry([1]*2)
sym2a = tensorsymmetry([2])
sym3 = tensorsymmetry([1]*3)
sym3a = tensorsymmetry([3])
# A_d0*A^d0; ord = [d0,-d0]
# T_c = A^d0*A_d0
S1 = TensorType([Lorentz], sym1)
A = S1('A')
t = A(-d0)*A(d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0)*A(-L_0)'
# A commuting
# A_d0*A_d1*A_d2*A^d2*A^d1*A^d0
# T_c = A^d0*A_d0*A^d1*A_d1*A^d2*A_d2
t = A(-d0)*A(-d1)*A(-d2)*A(d2)*A(d1)*A(d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0)*A(-L_0)*A(L_1)*A(-L_1)*A(L_2)*A(-L_2)'
# A anticommuting
# A_d0*A_d1*A_d2*A^d2*A^d1*A^d0
# T_c 0
A = S1('A', 1)
t = A(-d0)*A(-d1)*A(-d2)*A(d2)*A(d1)*A(d0)
tc = t.canon_bp()
assert tc == 0
# A commuting symmetric
# A^{d0 b}*A^a_d1*A^d1_d0
# T_c = A^{a d0}*A^{b d1}*A_{d0 d1}
S2 = TensorType([Lorentz]*2, sym2)
A = S2('A')
t = A(d0, b)*A(a, -d1)*A(d1, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(a, L_0)*A(b, L_1)*A(-L_0, -L_1)'
# A, B commuting symmetric
# A^{d0 b}*A^d1_d0*B^a_d1
# T_c = A^{b d0}*A_d0^d1*B^a_d1
B = S2('B')
t = A(d0, b)*A(d1, -d0)*B(a, -d1)
tc = t.canon_bp()
assert str(tc) == 'A(b, L_0)*A(-L_0, L_1)*B(a, -L_1)'
# A commuting symmetric
# A^{d1 d0 b}*A^{a}_{d1 d0}; ord=[a,b, d0,-d0,d1,-d1]
# T_c = A^{a d0 d1}*A^{b}_{d0 d1}
S3 = TensorType([Lorentz]*3, sym3)
A = S3('A')
t = A(d1, d0, b)*A(a, -d1, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(a, L_0, L_1)*A(b, -L_0, -L_1)'
# A^{d3 d0 d2}*A^a0_{d1 d2}*A^d1_d3^a1*A^{a2 a3}_d0
# T_c = A^{a0 d0 d1}*A^a1_d0^d2*A^{a2 a3 d3}*A_{d1 d2 d3}
t = A(d3, d0, d2)*A(a0, -d1, -d2)*A(d1, -d3, a1)*A(a2, a3, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(a0, L_0, L_1)*A(a1, -L_0, L_2)*A(a2, a3, L_3)*A(-L_1, -L_2, -L_3)'
# A commuting symmetric, B antisymmetric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# in this esxample and in the next three,
# renaming dummy indices and using symmetry of A,
# T = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3
# can = 0
S2a = TensorType([Lorentz]*2, sym2a)
A = S3('A')
B = S2a('B')
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert tc == 0
# A anticommuting symmetric, B anticommuting
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3}
A = S3('A', 1)
B = S2a('B')
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1, L_2)*A(-L_0, -L_1, L_3)*B(-L_2, -L_3)'
# A anticommuting symmetric, B antisymmetric commuting, antisymmetric metric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = -A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3}
Spinor = TensorIndexType('Spinor', metric=1, dummy_fmt='S')
a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \
tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Spinor)
S3 = TensorType([Spinor]*3, sym3)
S2a = TensorType([Spinor]*2, sym2a)
A = S3('A', 1)
B = S2a('B')
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert str(tc) == '-A(S_0, S_1, S_2)*A(-S_0, -S_1, S_3)*B(-S_2, -S_3)'
# A anticommuting symmetric, B antisymmetric anticommuting,
# no metric symmetry
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3
Mat = TensorIndexType('Mat', metric=None, dummy_fmt='M')
a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \
tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Mat)
S3 = TensorType([Mat]*3, sym3)
S2a = TensorType([Mat]*2, sym2a)
A = S3('A', 1)
B = S2a('B')
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert str(tc) == 'A(M_0, M_1, M_2)*A(-M_0, -M_1, -M_3)*B(-M_2, M_3)'
# Gamma anticommuting
# Gamma_{mu nu} * gamma^rho * Gamma^{nu mu alpha}
# T_c = -Gamma^{mu nu} * gamma^rho * Gamma_{alpha mu nu}
S1 = TensorType([Lorentz], sym1)
S2a = TensorType([Lorentz]*2, sym2a)
S3a = TensorType([Lorentz]*3, sym3a)
alpha, beta, gamma, mu, nu, rho = \
tensor_indices('alpha,beta,gamma,mu,nu,rho', Lorentz)
Gamma = S1('Gamma', 2)
Gamma2 = S2a('Gamma', 2)
Gamma3 = S3a('Gamma', 2)
t = Gamma2(-mu,-nu)*Gamma(rho)*Gamma3(nu, mu, alpha)
tc = t.canon_bp()
assert str(tc) == '-Gamma(L_0, L_1)*Gamma(rho)*Gamma(alpha, -L_0, -L_1)'
# Gamma_{mu nu} * Gamma^{gamma beta} * gamma_rho * Gamma^{nu mu alpha}
# T_c = Gamma^{mu nu} * Gamma^{beta gamma} * gamma_rho * Gamma^alpha_{mu nu}
t = Gamma2(mu, nu)*Gamma2(beta, gamma)*Gamma(-rho)*Gamma3(alpha, -mu, -nu)
tc = t.canon_bp()
assert str(tc) == 'Gamma(L_0, L_1)*Gamma(beta, gamma)*Gamma(-rho)*Gamma(alpha, -L_0, -L_1)'
# f^a_{b,c} antisymmetric in b,c; A_mu^a no symmetry
# f^c_{d a} * f_{c e b} * A_mu^d * A_nu^a * A^{nu e} * A^{mu b}
# g = [8,11,5, 9,13,7, 1,10, 3,4, 2,12, 0,6, 14,15]
# T_c = -f^{a b c} * f_a^{d e} * A^mu_b * A_{mu d} * A^nu_c * A_{nu e}
Flavor = TensorIndexType('Flavor', dummy_fmt='F')
a, b, c, d, e, ff = tensor_indices('a,b,c,d,e,f', Flavor)
mu, nu = tensor_indices('mu,nu', Lorentz)
sym_f = tensorsymmetry([1], [2])
S_f = TensorType([Flavor]*3, sym_f)
sym_A = tensorsymmetry([1], [1])
S_A = TensorType([Lorentz, Flavor], sym_A)
f = S_f('f')
A = S_A('A')
t = f(c, -d, -a)*f(-c, -e, -b)*A(-mu, d)*A(-nu, a)*A(nu, e)*A(mu, b)
tc = t.canon_bp()
assert str(tc) == '-f(F_0, F_1, F_2)*f(-F_0, F_3, F_4)*A(L_0, -F_1)*A(-L_0, -F_3)*A(L_1, -F_2)*A(-L_1, -F_4)'
def test_bug_correction_tensor_indices():
# to make sure that tensor_indices does not return a list if creating
# only one index:
from sympy.tensor.tensor import tensor_indices, TensorIndexType, TensorIndex
A = TensorIndexType("A")
i = tensor_indices('i', A)
assert not isinstance(i, (tuple, list))
assert isinstance(i, TensorIndex)
def test_riemann_invariants():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11 = \
tensor_indices('d0:12', Lorentz)
# R^{d0 d1}_{d1 d0}; ord = [d0,-d0,d1,-d1]
# T_c = -R^{d0 d1}_{d0 d1}
R = tensorhead('R', [Lorentz]*4, [[2, 2]])
t = R(d0, d1, -d1, -d0)
tc = t.canon_bp()
assert str(tc) == '-R(L_0, L_1, -L_0, -L_1)'
# R_d11^d1_d0^d5 * R^{d6 d4 d0}_d5 * R_{d7 d2 d8 d9} *
# R_{d10 d3 d6 d4} * R^{d2 d7 d11}_d1 * R^{d8 d9 d3 d10}
# can = [0,2,4,6, 1,3,8,10, 5,7,12,14, 9,11,16,18, 13,15,20,22,
# 17,19,21<F10,23, 24,25]
# T_c = R^{d0 d1 d2 d3} * R_{d0 d1}^{d4 d5} * R_{d2 d3}^{d6 d7} *
# R_{d4 d5}^{d8 d9} * R_{d6 d7}^{d10 d11} * R_{d8 d9 d10 d11}
t = R(-d11,d1,-d0,d5)*R(d6,d4,d0,-d5)*R(-d7,-d2,-d8,-d9)* \
R(-d10,-d3,-d6,-d4)*R(d2,d7,d11,-d1)*R(d8,d9,d3,d10)
tc = t.canon_bp()
assert str(tc) == 'R(L_0, L_1, L_2, L_3)*R(-L_0, -L_1, L_4, L_5)*R(-L_2, -L_3, L_6, L_7)*R(-L_4, -L_5, L_8, L_9)*R(-L_6, -L_7, L_10, L_11)*R(-L_8, -L_9, -L_10, -L_11)'
def test_riemann_products():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
d0, d1, d2, d3, d4, d5, d6 = tensor_indices('d0:7', Lorentz)
a0, a1, a2, a3, a4, a5 = tensor_indices('a0:6', Lorentz)
a, b = tensor_indices('a,b', Lorentz)
R = tensorhead('R', [Lorentz]*4, [[2, 2]])
# R^{a b d0}_d0 = 0
t = R(a, b, d0, -d0)
tc = t.canon_bp()
assert tc == 0
# R^{d0 b a}_d0
# T_c = -R^{a d0 b}_d0
t = R(d0, b, a, -d0)
tc = t.canon_bp()
assert str(tc) == '-R(a, L_0, b, -L_0)'
# R^d1_d2^b_d0 * R^{d0 a}_d1^d2; ord=[a,b,d0,-d0,d1,-d1,d2,-d2]
# T_c = -R^{a d0 d1 d2}* R^b_{d0 d1 d2}
t = R(d1, -d2, b, -d0)*R(d0, a, -d1, d2)
tc = t.canon_bp()
assert str(tc) == '-R(a, L_0, L_1, L_2)*R(b, -L_0, -L_1, -L_2)'
# A symmetric commuting
# R^{d6 d5}_d2^d1 * R^{d4 d0 d2 d3} * A_{d6 d0} A_{d3 d1} * A_{d4 d5}
# g = [12,10,5,2, 8,0,4,6, 13,1, 7,3, 9,11,14,15]
# T_c = -R^{d0 d1 d2 d3} * R_d0^{d4 d5 d6} * A_{d1 d4}*A_{d2 d5}*A_{d3 d6}
V = tensorhead('V', [Lorentz]*2, [[1]*2])
t = R(d6, d5, -d2, d1)*R(d4, d0, d2, d3)*V(-d6, -d0)*V(-d3, -d1)*V(-d4, -d5)
tc = t.canon_bp()
assert str(tc) == '-R(L_0, L_1, L_2, L_3)*R(-L_0, L_4, L_5, L_6)*V(-L_1, -L_4)*V(-L_2, -L_5)*V(-L_3, -L_6)'
# R^{d2 a0 a2 d0} * R^d1_d2^{a1 a3} * R^{a4 a5}_{d0 d1}
# T_c = R^{a0 d0 a2 d1}*R^{a1 a3}_d0^d2*R^{a4 a5}_{d1 d2}
t = R(d2, a0, a2, d0)*R(d1, -d2, a1, a3)*R(a4, a5, -d0, -d1)
tc = t.canon_bp()
assert str(tc) == 'R(a0, L_0, a2, L_1)*R(a1, a3, -L_0, L_2)*R(a4, a5, -L_1, -L_2)'
######################################################################
def test_canonicalize2():
D = Symbol('D')
Eucl = TensorIndexType('Eucl', metric=0, dim=D, dummy_fmt='E')
i0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14 = \
tensor_indices('i0:15', Eucl)
A = tensorhead('A', [Eucl]*3, [[3]])
# two examples from Cvitanovic, Group Theory page 59
# of identities for antisymmetric tensors of rank 3
# contracted according to the Kuratowski graph eq.(6.59)
t = A(i0,i1,i2)*A(-i1,i3,i4)*A(-i3,i7,i5)*A(-i2,-i5,i6)*A(-i4,-i6,i8)
t1 = t.canon_bp()
assert t1 == 0
# eq.(6.60)
#t = A(i0,i1,i2)*A(-i1,i3,i4)*A(-i2,i5,i6)*A(-i3,i7,i8)*A(-i6,-i7,i9)*
# A(-i8,i10,i13)*A(-i5,-i10,i11)*A(-i4,-i11,i12)*A(-i3,-i12,i14)
t = A(i0,i1,i2)*A(-i1,i3,i4)*A(-i2,i5,i6)*A(-i3,i7,i8)*A(-i6,-i7,i9)*\
A(-i8,i10,i13)*A(-i5,-i10,i11)*A(-i4,-i11,i12)*A(-i9,-i12,i14)
t1 = t.canon_bp()
assert t1 == 0
def test_canonicalize3():
D = Symbol('D')
Spinor = TensorIndexType('Spinor', dim=D, metric=True, dummy_fmt='S')
a0,a1,a2,a3,a4 = tensor_indices('a0:5', Spinor)
C = Spinor.metric
chi, psi = tensorhead('chi,psi', [Spinor], [[1]], 1)
t = chi(a1)*psi(a0)
t1 = t.canon_bp()
assert t1 == t
t = psi(a1)*chi(a0)
t1 = t.canon_bp()
assert t1 == -chi(a0)*psi(a1)
class Metric(Basic):
def __new__(cls, name, antisym, **kwargs):
obj = Basic.__new__(cls, name, antisym, **kwargs)
obj.name = name
obj.antisym = antisym
return obj
def test_TensorIndexType():
D = Symbol('D')
G = Metric('g', False)
Lorentz = TensorIndexType('Lorentz', metric=G, dim=D, dummy_fmt='L')
m0, m1, m2, m3, m4 = tensor_indices('m0:5', Lorentz)
sym2 = tensorsymmetry([1]*2)
sym2n = tensorsymmetry(*get_symmetric_group_sgs(2))
assert sym2 == sym2n
g = Lorentz.metric
assert str(g) == 'g(Lorentz,Lorentz)'
assert Lorentz.eps_dim == Lorentz.dim
TSpace = TensorIndexType('TSpace')
i0, i1 = tensor_indices('i0 i1', TSpace)
g = TSpace.metric
A = tensorhead('A', [TSpace]*2, [[1]*2])
assert str(A(i0,-i0).canon_bp()) == 'A(TSpace_0, -TSpace_0)'
def test_indices():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
assert a.tensortype == Lorentz
assert a != -a
A, B = tensorhead('A B', [Lorentz]*2, [[1]*2])
t = A(a,b)*B(-b,c)
indices = t.get_indices()
L_0 = TensorIndex('L_0', Lorentz)
assert indices == [a, L_0, -L_0, c]
raises(ValueError, lambda: tensor_indices(3, Lorentz))
raises(ValueError, lambda: A(a,b,c))
def test_tensorsymmetry():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
sym = tensorsymmetry([1]*2)
sym1 = TensorSymmetry(get_symmetric_group_sgs(2))
assert sym == sym1
sym = tensorsymmetry([2])
sym1 = TensorSymmetry(get_symmetric_group_sgs(2, 1))
assert sym == sym1
sym2 = tensorsymmetry()
assert sym2.base == Tuple() and sym2.generators == Tuple(Permutation(1))
raises(NotImplementedError, lambda: tensorsymmetry([2, 1]))
def test_TensorType():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
sym = tensorsymmetry([1]*2)
A = tensorhead('A', [Lorentz]*2, [[1]*2])
assert A.typ == TensorType([Lorentz]*2, sym)
assert A.types == [Lorentz]
typ = TensorType([Lorentz]*2, sym)
assert str(typ) == "TensorType(['Lorentz', 'Lorentz'])"
raises(ValueError, lambda: typ(2))
def test_TensExpr():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
g = Lorentz.metric
A, B = tensorhead('A B', [Lorentz]*2, [[1]*2])
raises(ValueError, lambda: g(c, d)/g(a, b))
raises(ValueError, lambda: S.One/g(a, b))
raises(ValueError, lambda: (A(c, d) + g(c, d))/g(a, b))
raises(ValueError, lambda: S.One/(A(c, d) + g(c, d)))
raises(ValueError, lambda: A(a, b) + A(a, c))
t = A(a, b) + B(a, b)
raises(NotImplementedError, lambda: TensExpr.__mul__(t, 'a'))
raises(NotImplementedError, lambda: TensExpr.__add__(t, 'a'))
raises(NotImplementedError, lambda: TensExpr.__radd__(t, 'a'))
raises(NotImplementedError, lambda: TensExpr.__sub__(t, 'a'))
raises(NotImplementedError, lambda: TensExpr.__rsub__(t, 'a'))
raises(NotImplementedError, lambda: TensExpr.__div__(t, 'a'))
raises(NotImplementedError, lambda: TensExpr.__rdiv__(t, 'a'))
raises(ValueError, lambda: A(a, b)**2)
raises(NotImplementedError, lambda: 2**A(a, b))
raises(NotImplementedError, lambda: abs(A(a, b)))
def test_TensorHead():
assert TensAdd() == 0
# simple example of algebraic expression
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a,b = tensor_indices('a,b', Lorentz)
# A, B symmetric
A = tensorhead('A', [Lorentz]*2, [[1]*2])
assert A.rank == 2
assert A.symmetry == tensorsymmetry([1]*2)
def test_add1():
assert TensAdd() == 0
# simple example of algebraic expression
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a,b,d0,d1,i,j,k = tensor_indices('a,b,d0,d1,i,j,k', Lorentz)
# A, B symmetric
A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
t1 = A(b,-d0)*B(d0,a)
assert TensAdd(t1).equals(t1)
t2a = B(d0,a) + A(d0, a)
t2 = A(b,-d0)*t2a
assert str(t2) == 'A(a, L_0)*A(b, -L_0) + A(b, L_0)*B(a, -L_0)'
t2b = t2 + t1
assert str(t2b) == '2*A(b, L_0)*B(a, -L_0) + A(a, L_0)*A(b, -L_0)'
p, q, r = tensorhead('p,q,r', [Lorentz], [[1]])
t = q(d0)*2
assert str(t) == '2*q(d0)'
t = 2*q(d0)
assert str(t) == '2*q(d0)'
t1 = p(d0) + 2*q(d0)
assert str(t1) == '2*q(d0) + p(d0)'
t2 = p(-d0) + 2*q(-d0)
assert str(t2) == '2*q(-d0) + p(-d0)'
t1 = p(d0)
t3 = t1*t2
assert str(t3) == '2*p(L_0)*q(-L_0) + p(L_0)*p(-L_0)'
t3 = t2*t1
assert str(t3) == '2*p(L_0)*q(-L_0) + p(L_0)*p(-L_0)'
t1 = p(d0) + 2*q(d0)
t3 = t1*t2
assert str(t3) == '4*p(L_0)*q(-L_0) + 4*q(L_0)*q(-L_0) + p(L_0)*p(-L_0)'
t1 = p(d0) - 2*q(d0)
assert str(t1) == '-2*q(d0) + p(d0)'
t2 = p(-d0) + 2*q(-d0)
t3 = t1*t2
assert t3 == p(d0)*p(-d0) - 4*q(d0)*q(-d0)
t = p(i)*p(j)*(p(k) + q(k)) + p(i)*(p(j) + q(j))*(p(k) - 3*q(k))
assert t == 2*p(i)*p(j)*p(k) - 2*p(i)*p(j)*q(k) + p(i)*p(k)*q(j) - 3*p(i)*q(j)*q(k)
t1 = (p(i) + q(i) + 2*r(i))*(p(j) - q(j))
t2 = (p(j) + q(j) + 2*r(j))*(p(i) - q(i))
t = t1 + t2
assert t == 2*p(i)*p(j) + 2*p(i)*r(j) + 2*p(j)*r(i) - 2*q(i)*q(j) - 2*q(i)*r(j) - 2*q(j)*r(i)
t = p(i)*q(j)/2
assert 2*t == p(i)*q(j)
t = (p(i) + q(i))/2
assert 2*t == p(i) + q(i)
t = S.One - p(i)*p(-i)
assert (t + p(-j)*p(j)).equals(1)
t = S.One + p(i)*p(-i)
assert (t - p(-j)*p(j)).equals(1)
t = A(a, b) + B(a, b)
assert t.rank == 2
t1 = t - A(a, b) - B(a, b)
assert t1 == 0
t = 1 - (A(a, -a) + B(a, -a))
t1 = 1 + (A(a, -a) + B(a, -a))
assert (t + t1).equals(2)
t2 = 1 + A(a, -a)
assert t1 != t2
assert t2 != TensMul.from_data(0, [], [], [])
t = p(i) + q(i)
raises(ValueError, lambda: t(i, j))
def test_special_eq_ne():
# test special equality cases:
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a,b,d0,d1,i,j,k = tensor_indices('a,b,d0,d1,i,j,k', Lorentz)
# A, B symmetric
A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
p, q, r = tensorhead('p,q,r', [Lorentz], [[1]])
t = 0*A(a, b)
assert _is_equal(t, 0)
assert _is_equal(t, S.Zero)
assert p(i) != A(a, b)
assert A(a, -a) != A(a, b)
assert 0*(A(a, b) + B(a, b)) == 0
assert 0*(A(a, b) + B(a, b)) == S.Zero
assert 3*(A(a, b) - A(a, b)) == S.Zero
assert p(i) + q(i) != A(a, b)
assert p(i) + q(i) != A(a, b) + B(a, b)
assert p(i) - p(i) == 0
assert p(i) - p(i) == S.Zero
assert _is_equal(A(a, b), A(b, a))
def test_add2():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
m, n, p, q = tensor_indices('m,n,p,q', Lorentz)
R = tensorhead('R', [Lorentz]*4, [[2, 2]])
A = tensorhead('A', [Lorentz]*3, [[3]])
t1 = 2*R(m, n, p, q) - R(m, q, n, p) + R(m, p, n, q)
t2 = t1*A(-n, -p, -q)
assert t2 == 0
t1 = S(2)/3*R(m,n,p,q) - S(1)/3*R(m,q,n,p) + S(1)/3*R(m,p,n,q)
t2 = t1*A(-n, -p, -q)
assert t2 == 0
t = A(m, -m, n) + A(n, p, -p)
assert t == 0
def test_mul():
from sympy.abc import x
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
sym = tensorsymmetry([1]*2)
t = TensMul.from_data(S.One, [], [], [])
assert str(t) == '1'
A, B = tensorhead('A B', [Lorentz]*2, [[1]*2])
t = (1 + x)*A(a, b)
assert str(t) == '(x + 1)*A(a, b)'
assert t.types == [Lorentz]
assert t.rank == 2
assert t.dum == []
assert t.coeff == 1 + x
assert sorted(t.free) == [(a, 0, 0), (b, 1, 0)]
assert t.components == [A]
t = A(-b, a)*B(-a, c)*A(-c, d)
t1 = tensor_mul(*t.split())
assert t == t(-b, d)
assert t == t1
assert tensor_mul(*[]) == TensMul.from_data(S.One, [], [], [])
t = TensMul.from_data(1, [], [], [])
zsym = tensorsymmetry()
typ = TensorType([], zsym)
C = typ('C')
assert str(C()) == 'C'
assert str(t) == '1'
assert t.split()[0] == t
raises(ValueError, lambda: TIDS.free_dum_from_indices(a, a))
raises(ValueError, lambda: TIDS.free_dum_from_indices(-a, -a))
raises(ValueError, lambda: A(a, b)*A(a, c))
t = A(a, b)*A(-a, c)
raises(ValueError, lambda: t(a, b, c))
def test_substitute_indices():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
i, j, k, l, m, n, p, q = tensor_indices('i,j,k,l,m,n,p,q', Lorentz)
A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
t = A(i, k)*B(-k, -j)
t1 = t.substitute_indices((i, j), (j, k))
t1a = A(j, l)*B(-l, -k)
assert t1 == t1a
p = tensorhead('p', [Lorentz], [[1]])
t = p(i)
t1 = t.substitute_indices((j, k))
assert t1 == t
t1 = t.substitute_indices((i, j))
assert t1 == p(j)
t1 = t.substitute_indices((i, -j))
assert t1 == p(-j)
t1 = t.substitute_indices((-i, j))
assert t1 == p(-j)
t1 = t.substitute_indices((-i, -j))
assert t1 == p(j)
A_tmul = A(m, n)
A_c = A_tmul(m, -m)
assert _is_equal(A_c, A(n, -n))
ABm = A(i, j)*B(m, n)
ABc1 = ABm(i, j, -i, -j)
assert _is_equal(ABc1, A(i, -j)*B(-i, j))
ABc2 = ABm(i, -i, j, -j)
assert _is_equal(ABc2, A(m, -m)*B(-n, n))
asum = A(i, j) + B(i, j)
asc1 = asum(i, -i)
assert _is_equal(asc1, A(i, -i) + B(i, -i))
assert A(i, -i) == A(i, -i)()
assert A(i, -i) + B(-j, j) == ((A(i, -i) + B(i, -i)))()
assert _is_equal(A(i, j)*B(-j, k), (A(m, -j)*B(j, n))(i, k))
raises(ValueError, lambda: A(i, -i)(j, k))
def test_riemann_cyclic_replace():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
m0, m1, m2, m3 = tensor_indices('m:4', Lorentz)
symr = tensorsymmetry([2, 2])
R = tensorhead('R', [Lorentz]*4, [[2, 2]])
t = R(m0, m2, m1, m3)
t1 = riemann_cyclic_replace(t)
t1a = -S.One/3*R(m0, m3, m2, m1) + S.One/3*R(m0, m1, m2, m3) + Rational(2, 3)*R(m0, m2, m1, m3)
assert t1 == t1a
def test_riemann_cyclic():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
i, j, k, l, m, n, p, q = tensor_indices('i,j,k,l,m,n,p,q', Lorentz)
R = tensorhead('R', [Lorentz]*4, [[2, 2]])
t = R(i,j,k,l) + R(i,l,j,k) + R(i,k,l,j) - \
R(i,j,l,k) - R(i,l,k,j) - R(i,k,j,l)
t2 = t*R(-i,-j,-k,-l)
t3 = riemann_cyclic(t2)
assert t3 == 0
t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l))
t1 = riemann_cyclic(t)
assert t1 == 0
t = R(i,j,k,l)
t1 = riemann_cyclic(t)
assert t1 == -S(1)/3*R(i, l, j, k) + S(1)/3*R(i, k, j, l) + S(2)/3*R(i, j, k, l)
t = R(i,j,k,l)*R(-k,-l,m,n)*(R(-m,-n,-i,-j) + 2*R(-m,-j,-n,-i))
t1 = riemann_cyclic(t)
assert t1 == 0
def test_div():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
m0,m1,m2,m3 = tensor_indices('m0:4', Lorentz)
R = tensorhead('R', [Lorentz]*4, [[2, 2]])
t = R(m0,m1,-m1,m3)
t1 = t/S(4)
assert str(t1) == '1/4*R(m0, L_0, -L_0, m3)'
t = t.canon_bp()
assert not t1._is_canon_bp
t1 = t*4
assert t1._is_canon_bp
t1 = t1/4
assert t1._is_canon_bp
def test_contract_metric1():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
a, b, c, d, e = tensor_indices('a,b,c,d,e', Lorentz)
g = Lorentz.metric
p = tensorhead('p', [Lorentz], [[1]])
t = g(a, b)*p(-b)
t1 = t.contract_metric(g)
assert t1 == p(a)
A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
# case with g with all free indices
t1 = A(a,b)*B(-b,c)*g(d, e)
t2 = t1.contract_metric(g)
assert t1 == t2
# case of g(d, -d)
t1 = A(a,b)*B(-b,c)*g(-d, d)
t2 = t1.contract_metric(g)
assert t2 == D*A(a, d)*B(-d, c)
# g with one free index
t1 = A(a,b)*B(-b,-c)*g(c, d)
t2 = t1.contract_metric(g)
assert t2 == A(a, c)*B(-c, d)
# g with both indices contracted with another tensor
t1 = A(a,b)*B(-b,-c)*g(c, -a)
t2 = t1.contract_metric(g)
assert _is_equal(t2, A(a, b)*B(-b, -a))
t1 = A(a,b)*B(-b,-c)*g(c, d)*g(-a, -d)
t2 = t1.contract_metric(g)
assert _is_equal(t2, A(a,b)*B(-b,-a))
t1 = A(a,b)*g(-a,-b)
t2 = t1.contract_metric(g)
assert _is_equal(t2, A(a, -a))
assert not t2.free
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b = tensor_indices('a,b', Lorentz)
g = Lorentz.metric
raises(ValueError, lambda: g(a, -a).contract_metric(g)) # no dim
def test_contract_metric2():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
a, b, c, d, e, L_0 = tensor_indices('a,b,c,d,e,L_0', Lorentz)
g = Lorentz.metric
p, q = tensorhead('p,q', [Lorentz], [[1]])
t1 = g(a,b)*p(c)*p(-c)
t2 = 3*g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
assert t == 3*D*p(a)*p(-a)*q(b)*q(-b)
t1 = g(a,b)*p(c)*p(-c)
t2 = 3*q(-a)*q(-b)
t = t1*t2
t = t.contract_metric(g)
t = t.canon_bp()
assert t == 3*p(a)*p(-a)*q(b)*q(-b)
t1 = 2*g(a,b)*p(c)*p(-c)
t2 = - 3*g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
t = 6*g(a,b)*g(-a,-b)*p(c)*p(-c)*q(d)*q(-d)
t = t.contract_metric(g)
t1 = 2*g(a,b)*p(c)*p(-c)
t2 = q(-a)*q(-b) + 3*g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
assert t == (2 + 6*D)*p(a)*p(-a)*q(b)*q(-b)
t1 = p(a)*p(b) + p(a)*q(b) + 2*g(a,b)*p(c)*p(-c)
t2 = q(-a)*q(-b) - g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
t1 = (1 - 2*D)*p(a)*p(-a)*q(b)*q(-b) + p(a)*q(-a)*p(b)*q(-b)
assert t == t1
t = g(a,b)*g(c,d)*g(-b,-c)
t1 = t.contract_metric(g)
assert t1 == g(a, d)
t1 = g(a,b)*g(c,d) + g(a,c)*g(b,d) + g(a,d)*g(b,c)
t2 = t1.substitute_indices((a,-a),(b,-b),(c,-c),(d,-d))
t = t1*t2
t = t.contract_metric(g)
assert t.equals(3*D**2 + 6*D)
t = 2*p(a)*g(b,-b)
t1 = t.contract_metric(g)
assert t1.equals(2*D*p(a))
t = 2*p(a)*g(b,-a)
t1 = t.contract_metric(g)
assert t1 == 2*p(b)
M = Symbol('M')
t = (p(a)*p(b) + g(a, b)*M**2)*g(-a, -b) - D*M**2
t1 = t.contract_metric(g)
assert t1 == p(a)*p(-a)
A = tensorhead('A', [Lorentz]*2, [[1]*2])
t = A(a, b)*p(L_0)*g(-a, -b)
t1 = t.contract_metric(g)
assert str(t1) == 'A(L_1, -L_1)*p(L_0)' or str(t1) == 'A(-L_1, L_1)*p(L_0)'
def test_metric_contract3():
D = Symbol('D')
Spinor = TensorIndexType('Spinor', dim=D, metric=True, dummy_fmt='S')
a0,a1,a2,a3,a4 = tensor_indices('a0:5', Spinor)
C = Spinor.metric
chi, psi = tensorhead('chi,psi', [Spinor], [[1]], 1)
B = tensorhead('B', [Spinor]*2, [[1],[1]])
t = C(a0, -a0)
t1 = t.contract_metric(C)
assert t1.equals(-D)
t = C(-a0, a0)
t1 = t.contract_metric(C)
assert t1.equals(D)
t = C(a0,a1)*C(-a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(D)
t = C(a1,a0)*C(-a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(-D)
t = C(-a0,a1)*C(a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(-D)
t = C(a1,-a0)*C(a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(D)
t = C(a0,a1)*B(-a1,-a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, B(a0, -a0))
t = C(a1,a0)*B(-a1,-a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(a0, -a0))
t = C(a0,-a1)*B(a1,-a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(a0, -a0))
t = C(-a0,a1)*B(-a1,a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(a0, -a0))
t = C(-a0,-a1)*B(a1,a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, B(a0, -a0))
t = C(-a1, a0)*B(a1,-a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, B(a0, -a0))
t = C(a0,a1)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, psi(a0))
t = C(a1,a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -psi(a0))
t = C(a0,a1)*chi(-a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -chi(a1)*psi(-a1))
t = C(a1,a0)*chi(-a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, chi(a1)*psi(-a1))
t = C(-a1,a0)*chi(-a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, chi(-a1)*psi(a1))
t = C(a0, -a1)*chi(-a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -chi(-a1)*psi(a1))
t = C(-a0,-a1)*chi(a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, chi(-a1)*psi(a1))
t = C(-a1,-a0)*chi(a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -chi(-a1)*psi(a1))
t = C(-a1,-a0)*B(a0,a2)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(-a1,a2)*psi(a1))
t = C(a1,a0)*B(-a2,-a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, B(-a2,a1)*psi(-a1))
def test_epsilon():
Lorentz = TensorIndexType('Lorentz', dim=4, dummy_fmt='L')
a, b, c, d, e = tensor_indices('a,b,c,d,e', Lorentz)
g = Lorentz.metric
epsilon = Lorentz.epsilon
p, q, r, s = tensorhead('p,q,r,s', [Lorentz], [[1]])
t = epsilon(b,a,c,d)
t1 = t.canon_bp()
assert t1 == -epsilon(a,b,c,d)
t = epsilon(c,b,d,a)
t1 = t.canon_bp()
assert t1 == epsilon(a,b,c,d)
t = epsilon(c,a,d,b)
t1 = t.canon_bp()
assert t1 == -epsilon(a,b,c,d)
t = epsilon(a,b,c,d)*p(-a)*q(-b)
t1 = t.canon_bp()
assert t1 == epsilon(c, d, a, b)*p(-a)*q(-b)
t = epsilon(c,b,d,a)*p(-a)*q(-b)
t1 = t.canon_bp()
assert t1 == epsilon(c, d, a, b)*p(-a)*q(-b)
t = epsilon(c,a,d,b)*p(-a)*q(-b)
t1 = t.canon_bp()
assert t1 == -epsilon(c, d, a, b)*p(-a)*q(-b)
t = epsilon(c,a,d,b)*p(-a)*p(-b)
t1 = t.canon_bp()
assert t1 == 0
t = epsilon(c,a,d,b)*p(-a)*q(-b) + epsilon(a,b,c,d)*p(-b)*q(-a)
t1 = t.canon_bp()
assert t1 == -2*epsilon(c, d, a, b)*p(-a)*q(-b)
def test_contract_delta1():
# see Group Theory by Cvitanovic page 9
n = Symbol('n')
Color = TensorIndexType('Color', metric=None, dim=n, dummy_fmt='C')
a, b, c, d, e, f = tensor_indices('a,b,c,d,e,f', Color)
delta = Color.delta
def idn(a, b, d, c):
assert a.is_up and d.is_up
assert not (b.is_up or c.is_up)
return delta(a, c)*delta(d, b)
def T(a, b, d, c):
assert a.is_up and d.is_up
assert not (b.is_up or c.is_up)
return delta(a, b)*delta(d, c)
def P1(a, b, c, d):
return idn(a,b,c,d) - 1/n*T(a,b,c,d)
def P2(a, b, c, d):
return 1/n*T(a,b,c,d)
t = P1(a, -b, e, -f)*P1(f, -e, d, -c)
t1 = t.contract_delta(delta)
assert t1 == P1(a, -b, d, -c)
t = P2(a, -b, e, -f)*P2(f, -e, d, -c)
t1 = t.contract_delta(delta)
assert t1 == P2(a, -b, d, -c)
t = P1(a, -b, e, -f)*P2(f, -e, d, -c)
t1 = t.contract_delta(delta)
assert t1 == 0
t = P1(a, -b, b, -a)
t1 = t.contract_delta(delta)
assert t1.equals(n**2 - 1)
def test_fun():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
a,b,c,d,e = tensor_indices('a,b,c,d,e', Lorentz)
g = Lorentz.metric
p, q = tensorhead('p q', [Lorentz], [[1]])
t = q(c)*p(a)*q(b) + g(a,b)*g(c,d)*q(-d)
assert t(a,b,c) == t
assert t - t(b,a,c) == q(c)*p(a)*q(b) - q(c)*p(b)*q(a)
assert t(b,c,d) == q(d)*p(b)*q(c) + g(b,c)*g(d,e)*q(-e)
t1 = t.fun_eval((a,b),(b,a))
assert t1 == q(c)*p(b)*q(a) + g(a,b)*g(c,d)*q(-d)
# check that g_{a b; c} = 0
# example taken from L. Brewin
# "A brief introduction to Cadabra" arxiv:0903.2085
# dg_{a b c} = \partial_{a} g_{b c} is symmetric in b, c
dg = tensorhead('dg', [Lorentz]*3, [[1], [1]*2])
# gamma^a_{b c} is the Christoffel symbol
gamma = S.Half*g(a,d)*(dg(-b,-d,-c) + dg(-c,-b,-d) - dg(-d,-b,-c))
# t = g_{a b; c}
t = dg(-c,-a,-b) - g(-a,-d)*gamma(d,-b,-c) - g(-b,-d)*gamma(d,-a,-c)
t = t.contract_metric(g)
assert t == 0
t = q(c)*p(a)*q(b)
assert t(b,c,d) == q(d)*p(b)*q(c)
def test_TensorManager():
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
LorentzH = TensorIndexType('LorentzH', dummy_fmt='LH')
i, j = tensor_indices('i,j', Lorentz)
ih, jh = tensor_indices('ih,jh', LorentzH)
p, q = tensorhead('p q', [Lorentz], [[1]])
ph, qh = tensorhead('ph qh', [LorentzH], [[1]])
Gsymbol = Symbol('Gsymbol')
GHsymbol = Symbol('GHsymbol')
TensorManager.set_comm(Gsymbol, GHsymbol, 0)
G = tensorhead('G', [Lorentz], [[1]], Gsymbol)
assert TensorManager._comm_i2symbol[G.comm] == Gsymbol
GH = tensorhead('GH', [LorentzH], [[1]], GHsymbol)
ps = G(i)*p(-i)
psh = GH(ih)*ph(-ih)
t = ps + psh
t1 = t*t
assert t1 == ps*ps + 2*ps*psh + psh*psh
qs = G(i)*q(-i)
qsh = GH(ih)*qh(-ih)
assert _is_equal(ps*qsh, qsh*ps)
assert not _is_equal(ps*qs, qs*ps)
n = TensorManager.comm_symbols2i(Gsymbol)
assert TensorManager.comm_i2symbol(n) == Gsymbol
assert GHsymbol in TensorManager._comm_symbols2i
raises(ValueError, lambda: TensorManager.set_comm(GHsymbol, 1, 2))
TensorManager.set_comms((Gsymbol,GHsymbol,0),(Gsymbol,1,1))
assert TensorManager.get_comm(n, 1) == TensorManager.get_comm(1, n) == 1
TensorManager.clear()
assert TensorManager.comm == [{0:0, 1:0, 2:0}, {0:0, 1:1, 2:None}, {0:0, 1:None}]
assert GHsymbol not in TensorManager._comm_symbols2i
nh = TensorManager.comm_symbols2i(GHsymbol)
assert GHsymbol in TensorManager._comm_symbols2i
def test_hash():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
a,b,c,d,e = tensor_indices('a,b,c,d,e', Lorentz)
g = Lorentz.metric
p, q = tensorhead('p q', [Lorentz], [[1]])
p_type = p.args[1]
t1 = p(a)*q(b)
t2 = p(a)*p(b)
assert hash(t1) != hash(t2)
t3 = p(a)*p(b) + g(a,b)
t4 = p(a)*p(b) - g(a,b)
assert hash(t3) != hash(t4)
assert a.func(*a.args) == a
assert Lorentz.func(*Lorentz.args) == Lorentz
assert g.func(*g.args) == g
assert p.func(*p.args) == p
assert p_type.func(*p_type.args) == p_type
assert p(a).func(*(p(a)).args) == p(a)
assert t1.func(*t1.args) == t1
assert t2.func(*t2.args) == t2
assert t3.func(*t3.args) == t3
assert t4.func(*t4.args) == t4
assert hash(a.func(*a.args)) == hash(a)
assert hash(Lorentz.func(*Lorentz.args)) == hash(Lorentz)
assert hash(g.func(*g.args)) == hash(g)
assert hash(p.func(*p.args)) == hash(p)
assert hash(p_type.func(*p_type.args)) == hash(p_type)
assert hash(p(a).func(*(p(a)).args)) == hash(p(a))
assert hash(t1.func(*t1.args)) == hash(t1)
assert hash(t2.func(*t2.args)) == hash(t2)
assert hash(t3.func(*t3.args)) == hash(t3)
assert hash(t4.func(*t4.args)) == hash(t4)
def check_all(obj):
return all([isinstance(_, Basic) for _ in obj.args])
assert check_all(a)
assert check_all(Lorentz)
assert check_all(g)
assert check_all(p)
assert check_all(p_type)
assert check_all(p(a))
assert check_all(t1)
assert check_all(t2)
assert check_all(t3)
assert check_all(t4)
tsymmetry = tensorsymmetry([2], [1], [1, 1, 1])
assert tsymmetry.func(*tsymmetry.args) == tsymmetry
assert hash(tsymmetry.func(*tsymmetry.args)) == hash(tsymmetry)
assert check_all(tsymmetry)
def test_hidden_indices_for_matrix_multiplication():
L = TensorIndexType('Lorentz')
S = TensorIndexType('Matind')
m0, m1, m2, m3, m4, m5 = tensor_indices('m0:6', L)
s0, s1, s2 = tensor_indices('s0:3', S)
A = tensorhead('A', [L, S, S], [[1], [1], [1]], matrix_behavior=True)
B = tensorhead('B', [L, S], [[1], [1]], matrix_behavior=True)
D = tensorhead('D', [L, L, S, S], [[1, 1], [1, 1]], matrix_behavior=True)
E = tensorhead('E', [L, L, L, L], [[1], [1], [1], [1]], matrix_behavior=True)
F = tensorhead('F', [L], [[1]], matrix_behavior=True)
assert (A(m0)) == A(m0, S.auto_left, -S.auto_right)
assert (B(-m1)) == B(-m1, S.auto_left)
A0 = A(m0)
B0 = B(-m0)
B1 = B(m1)
assert _is_equal((B1*A0*B0), B(m1, s0)*A(m0, -s0, s1)*B(-m0, -s1))
assert _is_equal((B0*A0), B(-m0, s0)*A(m0, -s0, -S.auto_right))
assert _is_equal((A0*B0), A(m0, S.auto_left, s0)*B(-m0, -s0))
C = tensorhead('C', [L, L], [[1]*2])
assert _is_equal((C(True, True)), C(L.auto_left, -L.auto_right))
assert _is_equal((A(m0)*C(m1, -m0)), A(m2, S.auto_left, -S.auto_right)*C(m1, -m2))
assert _is_equal((C(True, True)*C(True, True)), C(L.auto_left, m0)*C(-m0, -L.auto_right))
assert _is_equal(A(m0), A(m0))
assert _is_equal(B(-m1), B(-m1))
assert _is_equal(A(m0) - A(m0), 0)
ts1 = A(m0)*A(m1) + A(m1)*A(m0)
ts2 = A(m1)*A(m0) + A(m0)*A(m1)
assert _is_equal(ts1, ts2)
assert _is_equal(A(m0)*A(m1) + A(m1)*A(m0), A(m1)*A(m0) + A(m0)*A(m1))
assert _is_equal(A(m0), (2*A(m0))/2)
assert _is_equal(A(m0), -(-A(m0)))
assert _is_equal(2*A(m0) - 3*A(m0), -A(m0))
assert _is_equal(2*D(m0, m1) - 5*D(m1, m0), -3*D(m0, m1))
D0 = D(True, True, True, True)
Aa = A(True, True, True)
assert _is_equal(D0 * Aa, D(L.auto_left, m0, S.auto_left, s0)*A(-m0, -s0, -S.auto_right))
assert D(m0, m1) == D(m0, m1, S.auto_left, -S.auto_right)
raises(ValueError, lambda: C(True))
raises(ValueError, lambda: C())
raises(ValueError, lambda: E(True, True, True, True))
# test that a delta is automatically added on missing auto-matrix indices in TensAdd
assert F(m2)*F(m3)*F(m4)*A(m1) + E(m1, m2, m3, m4) == \
E(m1, m2, m3, m4)*S.delta(S.auto_left, -S.auto_right) +\
F(m2)*F(m3)*F(m4)*A(m1, S.auto_left, -S.auto_right)
assert E(m1, m2) + F(m1)*F(m2) == E(m1, m2) + F(m1)*F(m2)*L.delta(L.auto_left, -L.auto_right)
assert E(m1, m2)*A(m3) + F(m1)*F(m2)*F(m3) == \
E(m1, m2, L.auto_left, -L.auto_right)*A(m3, S.auto_left, -S.auto_right) +\
F(m1)*F(m2)*F(m3)*L.delta(L.auto_left, -L.auto_right)*S.delta(S.auto_left, -S.auto_right)
assert L.delta() == L.delta(L.auto_left, -L.auto_right)
assert S.delta() == S.delta(S.auto_left, -S.auto_right)
assert L.metric() == L.metric(L.auto_left, -L.auto_right)
assert S.metric() == S.metric(S.auto_left, -S.auto_right)
### TEST VALUED TENSORS ###
numpy = import_module('numpy')
def _get_valued_base_test_variables():
if numpy is None:
return
minkowski = Matrix((
(1, 0, 0, 0),
(0, -1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1),
))
Lorentz = TensorIndexType('Lorentz', dim=4)
Lorentz.data = minkowski
i0, i1, i2, i3, i4 = tensor_indices('i0:5', Lorentz)
E, px, py, pz = symbols('E px py pz')
A = tensorhead('A', [Lorentz], [[1]])
A.data = [E, px, py, pz]
B = tensorhead('B', [Lorentz], [[1]], 'Gcomm')
B.data = range(4)
AB = tensorhead("AB", [Lorentz] * 2, [[1]]*2)
AB.data = minkowski
ba_matrix = Matrix((
(1, 2, 3, 4),
(5, 6, 7, 8),
(9, 0, -1, -2),
(-3, -4, -5, -6),
))
BA = tensorhead("BA", [Lorentz] * 2, [[1]]*2)
BA.data = ba_matrix
BA(i0, i1)*A(-i0)*B(-i1)
# Let's test the diagonal metric, with inverted Minkowski metric:
LorentzD = TensorIndexType('LorentzD')
LorentzD.data = [-1, 1, 1, 1]
mu0, mu1, mu2 = tensor_indices('mu0:3', LorentzD)
C = tensorhead('C', [LorentzD], [[1]])
C.data = [E, px, py, pz]
### non-diagonal metric ###
ndm_matrix = (
(1, 1, 0,),
(1, 0, 1),
(0, 1, 0,),
)
ndm = TensorIndexType("ndm")
ndm.data = ndm_matrix
n0, n1, n2 = tensor_indices('n0:3', ndm)
NA = tensorhead('NA', [ndm], [[1]])
NA.data = range(10, 13)
NB = tensorhead('NB', [ndm]*2, [[1]]*2)
NB.data = [[i+j for j in range(10, 13)] for i in range(10, 13)]
NC = tensorhead('NC', [ndm]*3, [[1]]*3)
NC.data = [[[i+j+k for k in range(4, 7)] for j in range(1, 4)] for i in range(2, 5)]
return (A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4)
def test_valued_tensor_iter():
numpy = import_module("numpy")
if numpy is None:
skip("numpy not installed.")
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
# iteration on VTensorHead
assert list(A) == [E, px, py, pz]
assert list(ba_matrix) == list(BA)
# iteration on VTensMul
assert list(A(i1)) == [E, px, py, pz]
assert list(BA(i1, i2)) == list(ba_matrix)
assert list(3 * BA(i1, i2)) == [3 * i for i in list(ba_matrix)]
assert list(-5 * BA(i1, i2)) == [-5 * i for i in list(ba_matrix)]
# iteration on VTensAdd
# A(i1) + A(i1)
assert list(A(i1) + A(i1)) == [2*E, 2*px, 2*py, 2*pz]
assert BA(i1, i2) - BA(i1, i2) == 0
assert list(BA(i1, i2) - 2 * BA(i1, i2)) == [-i for i in list(ba_matrix)]
def test_valued_tensor_covariant_contravariant_elements():
numpy = import_module("numpy")
if numpy is None:
skip("numpy not installed.")
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert A(-i0)[0] == A(i0)[0]
assert A(-i0)[1] == -A(i0)[1]
assert AB(i0, i1)[1, 1] == -1
assert AB(i0, -i1)[1, 1] == 1
assert AB(-i0, -i1)[1, 1] == -1
assert AB(-i0, i1)[1, 1] == 1
def test_valued_tensor_get_matrix():
numpy = import_module("numpy")
if numpy is None:
skip("numpy not installed.")
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
matab = AB(i0, i1).get_matrix()
assert matab == Matrix([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, -1],
])
# when alternating contravariant/covariant with [1, -1, -1, -1] metric
# it becomes the identity matrix:
assert AB(i0, -i1).get_matrix() == eye(4)
# covariant and contravariant forms:
assert A(i0).get_matrix() == Matrix([E, px, py, pz])
assert A(-i0).get_matrix() == Matrix([E, -px, -py, -pz])
def test_valued_tensor_contraction():
numpy = import_module("numpy")
if numpy is None:
skip("numpy not installed.")
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert (A(i0) * A(-i0)).data == E ** 2 - px ** 2 - py ** 2 - pz ** 2
assert (A(i0) * A(-i0)).data == A ** 2
assert (A(i0) * A(-i0)).data == A(i0) ** 2
assert (A(i0) * B(-i0)).data == -px - 2 * py - 3 * pz
for i in range(4):
for j in range(4):
assert (A(i0) * B(-i1))[i, j] == [E, px, py, pz][i] * [0, -1, -2, -3][j]
# test contraction on the alternative Minkowski metric: [-1, 1, 1, 1]
assert (C(mu0) * C(-mu0)).data == -E ** 2 + px ** 2 + py ** 2 + pz ** 2
contrexp = A(i0) * AB(i1, -i0)
assert A(i0).rank == 1
assert AB(i1, -i0).rank == 2
assert contrexp.rank == 1
for i in range(4):
assert contrexp[i] == [E, px, py, pz][i]
def test_valued_tensor_self_contraction():
numpy = import_module("numpy")
if numpy is None:
skip("numpy not installed.")
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert AB(i0, -i0).data == 4
assert BA(i0, -i0).data == 2
def test_valued_tensor_pow():
numpy = import_module("numpy")
if numpy is None:
skip("numpy not installed.")
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert C**2 == -E**2 + px**2 + py**2 + pz**2
assert C**1 == sqrt(-E**2 + px**2 + py**2 + pz**2)
assert C(mu0)**2 == C**2
assert C(mu0)**1 == C**1
def test_valued_tensor_expressions():
numpy = import_module("numpy")
if numpy is None:
skip("numpy not installed.")
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
x1, x2, x3 = symbols('x1:4')
# test coefficient in contraction:
rank2coeff = x1 * A(i3) * B(i2)
assert rank2coeff[1, 1] == x1 * px
assert rank2coeff[3, 3] == 3 * pz * x1
coeff_expr = ((x1 * A(i4)) * (B(-i4) / x2)).data
assert coeff_expr.expand() == -px*x1/x2 - 2*py*x1/x2 - 3*pz*x1/x2
add_expr = A(i0) + B(i0)
assert add_expr[0] == E
assert add_expr[1] == px + 1
assert add_expr[2] == py + 2
assert add_expr[3] == pz + 3
sub_expr = A(i0) - B(i0)
assert sub_expr[0] == E
assert sub_expr[1] == px - 1
assert sub_expr[2] == py - 2
assert sub_expr[3] == pz - 3
assert (add_expr * B(-i0)).data == -px - 2*py - 3*pz - 14
expr1 = x1*A(i0) + x2*B(i0)
expr2 = expr1 * B(i1) * (-4)
expr3 = expr2 + 3*x3*AB(i0, i1)
expr4 = expr3 / 2
assert expr4 * 2 == expr3
expr5 = (expr4 * BA(-i1, -i0))
assert expr5.data.expand() == 28*E*x1 + 12*px*x1 + 20*py*x1 + 28*pz*x1 + 136*x2 + 3*x3
def test_noncommuting_components():
numpy = import_module("numpy")
if numpy is None:
skip("numpy not installed.")
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
euclid = TensorIndexType('Euclidean')
euclid.data = [1, 1]
i1, i2, i3 = tensor_indices('i1:4', euclid)
a, b, c, d = symbols('a b c d', commutative=False)
V1 = tensorhead('V1', [euclid] * 2, [[1]]*2)
V1.data = [[a, b], (c, d)]
V2 = tensorhead('V2', [euclid] * 2, [[1]]*2)
V2.data = [[a, c], [b, d]]
vtp = V1(i1, i2) * V2(-i2, -i1)
assert vtp.data == a**2 + b**2 + c**2 + d**2
assert vtp.data != a**2 + 2*b*c + d**2
Vc = (b * V1(i1, -i1)).data
assert Vc.expand() == b * a + b * d
def test_valued_non_diagonal_metric():
numpy = import_module("numpy")
if numpy is None:
skip("numpy not installed.")
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
mmatrix = Matrix(ndm_matrix)
assert (NA(n0)*NA(-n0)).data == (NA(n0).get_matrix().T * mmatrix * NA(n0).get_matrix())[0, 0]
def test_valued_assign_numpy_ndarray():
numpy = import_module("numpy")
if numpy is None:
skip("numpy not installed.")
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
# this is needed to make sure that a numpy.ndarray can be assigned to a
# tensor.
arr = [E+1, px-1, py, pz]
A.data = numpy.array(arr)
for i in range(4):
assert A(i0).data[i] == arr[i]
qx, qy, qz = symbols('qx qy qz')
A(-i0).data = numpy.array([E, qx, qy, qz])
for i in range(4):
assert A(i0).data[i] == [E, -qx, -qy, -qz][i]
assert A.data[i] == [E, -qx, -qy, -qz][i]
# test on multi-indexed tensors.
random_4x4_data = [[(i**3-3*i**2)%(j+7) for i in range(4)] for j in range(4)]
AB(-i0, -i1).data = random_4x4_data
for i in range(4):
for j in range(4):
assert AB(i0, i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)*(-1 if j else 1)
assert AB(-i0, i1).data[i, j] == random_4x4_data[i][j]*(-1 if j else 1)
assert AB(i0, -i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)
assert AB(-i0, -i1).data[i, j] == random_4x4_data[i][j]
AB(-i0, i1).data = random_4x4_data
for i in range(4):
for j in range(4):
assert AB(i0, i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)
assert AB(-i0, i1).data[i, j] == random_4x4_data[i][j]
assert AB(i0, -i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)*(-1 if j else 1)
assert AB(-i0, -i1).data[i, j] == random_4x4_data[i][j]*(-1 if j else 1)
def test_valued_metric_inverse():
numpy = import_module("numpy")
if numpy is None:
return
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
# let's assign some fancy matrix, just to verify it:
# (this has no physical sense, it's just testing sympy);
# it is symmetrical:
md = [[2, 2, 2, 1], [2, 3, 1, 0], [2, 1, 2, 3], [1, 0, 3, 2]]
Lorentz.data = md
m = Matrix(md)
metric = Lorentz.metric
minv = m.inv()
meye = eye(4)
# the Kronecker Delta:
KD = Lorentz.get_kronecker_delta()
for i in range(4):
for j in range(4):
assert metric(i0, i1).data[i, j] == m[i, j]
assert metric(-i0, -i1).data[i, j] == minv[i, j]
assert metric(i0, -i1).data[i, j] == meye[i, j]
assert metric(-i0, i1).data[i, j] == meye[i, j]
assert metric(i0, i1)[i, j] == m[i, j]
assert metric(-i0, -i1)[i, j] == minv[i, j]
assert metric(i0, -i1)[i, j] == meye[i, j]
assert metric(-i0, i1)[i, j] == meye[i, j]
assert KD(i0, -i1)[i, j] == meye[i, j]
def test_valued_canon_bp_swapaxes():
numpy = import_module("numpy")
if numpy is None:
return
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
e1 = A(i1)*A(i0)
e1.data[0, 1] = 44
e2 = e1.canon_bp()
assert e2 == A(i0)*A(i1)
for i in range(4):
for j in range(4):
assert e1[i, j] == e2[j, i]
o1 = B(i2)*A(i1)*B(i0)
o2 = o1.canon_bp()
for i in range(4):
for j in range(4):
for k in range(4):
assert o1[i, j, k] == o2[j, i, k]
def test_pprint():
Lorentz = TensorIndexType('Lorentz')
i0, i1, i2, i3, i4 = tensor_indices('i0:5', Lorentz)
A = tensorhead('A', [Lorentz], [[1]])
assert pretty(A) == "A(Lorentz)"
assert pretty(A(i0)) == "A(i0)"
def test_contract_automatrix_and_data():
numpy = import_module('numpy')
if numpy is None:
return
L = TensorIndexType('L')
S = TensorIndexType('S')
G = tensorhead('G', [L, S, S], [[1] * 3], matrix_behavior=True)
def G_data():
G.data = [[[1]]]
raises(ValueError, G_data)
L.data = [1, -1]
raises(ValueError, G_data)
S.data = [[1, 0], [0, 2]]
G.data = [
[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]
]
m0, m1, m2 = tensor_indices('m0:3', L)
s0, s1, s2 = tensor_indices('s0:3', S)
assert (G(-m0).data == numpy.array([
[[1, 4],
[3, 8]],
[[-5, -12],
[-7, -16]]
])).all()
(G(m0) * G(-m0)).data
G(m0, s0, -s1).data
c1 = G(m0, s0, -s1)*G(-m0, s1, -s2)
c2 = G(m0) * G(-m0)
assert (c1.data == c2.data).all()
del L.data
del S.data
del G.data
assert L.data is None
assert S.data is None
assert G.data is None
|
nkhuyu/commons | refs/heads/master | src/python/twitter/checkstyle/plugins/indentation.py | 14 | # ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import tokenize
from ..common import CheckstylePlugin
# TODO(wickman) Update this to sanitize line continuation styling as we have
# disabled it from pep8.py due to mismatched indentation styles.
class Indentation(CheckstylePlugin):
"""Enforce proper indentation."""
INDENT_LEVEL = 2 # the one true way
def nits(self):
indents = []
for token in self.python_file.tokens:
token_type, token_text, token_start = token[0:3]
if token_type is tokenize.INDENT:
last_indent = len(indents[-1]) if indents else 0
current_indent = len(token_text)
if current_indent - last_indent != self.INDENT_LEVEL:
yield self.error('T100',
'Indentation of %d instead of %d' % (current_indent - last_indent, self.INDENT_LEVEL),
token_start[0])
indents.append(token_text)
elif token_type is tokenize.DEDENT:
indents.pop()
|
zouyapeng/horizon | refs/heads/stable/juno | openstack_dashboard/dashboards/project/images/tests.py | 9 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from socket import timeout as socket_timeout # noqa
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images import utils
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:images:index')
CREATE_URL = reverse('horizon:project:images:images:create')
class ImagesAndSnapshotsTests(test.TestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_index(self):
images = self.images.list()
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([images,
False, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/images/index.html')
self.assertIn('images_table', res.context)
images_table = res.context['images_table']
images = images_table.data
self.assertTrue(len(images), 3)
row_actions = images_table.get_row_actions(images[0])
self.assertTrue(len(row_actions), 3)
row_actions = images_table.get_row_actions(images[1])
self.assertTrue(len(row_actions), 2)
self.assertTrue('delete_image' not in
[a.name for a in row_actions])
row_actions = images_table.get_row_actions(images[2])
self.assertTrue(len(row_actions), 3)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_index_no_images(self):
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([(),
False, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/images/index.html')
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_index_error(self):
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None) \
.AndRaise(self.exceptions.glance)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/images/index.html')
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_snapshot_actions(self):
snapshots = self.snapshots.list()
api.glance.image_list_detailed(IsA(http.HttpRequest), marker=None) \
.AndReturn([snapshots, False, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/images/index.html')
self.assertIn('images_table', res.context)
snaps = res.context['images_table']
self.assertEqual(len(snaps.get_rows()), 3)
row_actions = snaps.get_row_actions(snaps.data[0])
# first instance - status active, owned
self.assertEqual(len(row_actions), 4)
self.assertEqual(row_actions[0].verbose_name, u"Launch")
self.assertEqual(row_actions[1].verbose_name, u"Create Volume")
self.assertEqual(row_actions[2].verbose_name, u"Edit")
self.assertEqual(row_actions[3].verbose_name, u"Delete Image")
row_actions = snaps.get_row_actions(snaps.data[1])
# second instance - status active, not owned
self.assertEqual(len(row_actions), 2)
self.assertEqual(row_actions[0].verbose_name, u"Launch")
self.assertEqual(row_actions[1].verbose_name, u"Create Volume")
row_actions = snaps.get_row_actions(snaps.data[2])
# third instance - status queued, only delete is available
self.assertEqual(len(row_actions), 1)
self.assertEqual(unicode(row_actions[0].verbose_name),
u"Delete Image")
self.assertEqual(str(row_actions[0]), "<DeleteImage: delete>")
class ImagesAndSnapshotsUtilsTests(test.TestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_list_image(self):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([public_images, False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([private_images, False, False])
self.mox.ReplayAll()
ret = utils.get_available_images(self.request, self.tenant.id)
expected_images = [image for image in self.images.list()
if (image.status == 'active' and
image.container_format not in ('ami', 'aki'))]
self.assertEqual(len(expected_images), len(ret))
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_list_image_using_cache(self):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([public_images, False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([private_images, False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': 'other-tenant',
'status': 'active'}) \
.AndReturn([private_images, False, False])
self.mox.ReplayAll()
expected_images = [image for image in self.images.list()
if (image.status == 'active' and
image.container_format not in ('ari', 'aki'))]
images_cache = {}
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
self.assertEqual(len(expected_images), len(ret))
# image list for other-tenant
ret = utils.get_available_images(self.request, 'other-tenant',
images_cache)
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(2, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project']['other-tenant']))
@test.create_stubs({api.glance: ('image_list_detailed',),
exceptions: ('handle',)})
def test_list_image_error_public_image_list(self):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndRaise(self.exceptions.glance)
exceptions.handle(IsA(http.HttpRequest),
"Unable to retrieve public images.")
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([private_images, False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([public_images, False, False])
self.mox.ReplayAll()
images_cache = {}
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
expected_images = [image for image in private_images
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertNotIn('public_images', images_cache)
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
expected_images = [image for image in self.images.list()
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
@test.create_stubs({api.glance: ('image_list_detailed',),
exceptions: ('handle',)})
def test_list_image_error_private_image_list(self):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([public_images, False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndRaise(self.exceptions.glance)
exceptions.handle(IsA(http.HttpRequest),
"Unable to retrieve images for the current project.")
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([private_images, False, False])
self.mox.ReplayAll()
images_cache = {}
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
expected_images = [image for image in public_images
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertFalse(len(images_cache['images_by_project']))
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
expected_images = [image for image in self.images.list()
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
class SeleniumTests(test.SeleniumTestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_modal_create_image_from_url(self):
driver = self.selenium
images = self.images.list()
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([images,
False, False])
self.mox.ReplayAll()
driver.get("%s%s" % (self.live_server_url, INDEX_URL))
# Open the modal menu
driver.find_element_by_id("images__action_create").send_keys("\n")
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
srctypes = self.ui.Select(driver.find_element_by_id("id_source_type"))
srctypes.select_by_value("url")
copyfrom = driver.find_element_by_id("id_copy_from")
copyfrom.send_keys("http://www.test.com/test.iso")
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
body = formats.first_selected_option
self.assertTrue("ISO" in body.text,
"ISO should be selected when the extention is *.iso")
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_modal_create_image_from_file(self):
driver = self.selenium
images = self.images.list()
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([images,
False, False])
self.mox.ReplayAll()
driver.get("%s%s" % (self.live_server_url, INDEX_URL))
# Open the modal menu
driver.find_element_by_id("images__action_create").send_keys("\n")
wait = self.ui.WebDriverWait(driver, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
srctypes = self.ui.Select(driver.find_element_by_id("id_source_type"))
srctypes.select_by_value("file")
driver.find_element_by_id("id_image_file").send_keys("/tmp/test.iso")
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
body = formats.first_selected_option
self.assertTrue("ISO" in body.text,
"ISO should be selected when the extention is *.iso")
def test_create_image_from_url(self):
driver = self.selenium
driver.get("%s%s" % (self.live_server_url, CREATE_URL))
wait = self.ui.WebDriverWait(driver, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
srctypes = self.ui.Select(driver.find_element_by_id("id_source_type"))
srctypes.select_by_value("url")
copyfrom = driver.find_element_by_id("id_copy_from")
copyfrom.send_keys("http://www.test.com/test.iso")
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
body = formats.first_selected_option
self.assertTrue("ISO" in body.text,
"ISO should be selected when the extention is *.iso")
def test_create_image_from_file(self):
driver = self.selenium
driver.get("%s%s" % (self.live_server_url, CREATE_URL))
wait = self.ui.WebDriverWait(driver, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
srctypes = self.ui.Select(driver.find_element_by_id("id_source_type"))
srctypes.select_by_value("file")
driver.find_element_by_id("id_image_file").send_keys("/tmp/test.iso")
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
body = formats.first_selected_option
self.assertTrue("ISO" in body.text,
"ISO should be selected when the extention is *.iso")
|
jameslittle/dpkt | refs/heads/master | dpkt/ntp.py | 6 | # $Id: ntp.py 48 2008-05-27 17:31:15Z yardley $
# -*- coding: utf-8 -*-
"""Network Time Protocol."""
import dpkt
from decorators import deprecated
# NTP v4
# Leap Indicator (LI) Codes
NO_WARNING = 0
LAST_MINUTE_61_SECONDS = 1
LAST_MINUTE_59_SECONDS = 2
ALARM_CONDITION = 3
# Mode Codes
RESERVED = 0
SYMMETRIC_ACTIVE = 1
SYMMETRIC_PASSIVE = 2
CLIENT = 3
SERVER = 4
BROADCAST = 5
CONTROL_MESSAGE = 6
PRIVATE = 7
class NTP(dpkt.Packet):
__hdr__ = (
('flags', 'B', 0),
('stratum', 'B', 0),
('interval', 'B', 0),
('precision', 'B', 0),
('delay', 'I', 0),
('dispersion', 'I', 0),
('id', '4s', 0),
('update_time', '8s', 0),
('originate_time', '8s', 0),
('receive_time', '8s', 0),
('transmit_time', '8s', 0)
)
@property
def v(self):
return (self.flags >> 3) & 0x7
@v.setter
def v(self, v):
self.flags = (self.flags & ~0x38) | ((v & 0x7) << 3)
@property
def li(self):
return (self.flags >> 6) & 0x3
@li.setter
def li(self, li):
self.flags = (self.flags & ~0xc0) | ((li & 0x3) << 6)
@property
def mode(self):
return self.flags & 0x7
@mode.setter
def mode(self, mode):
self.flags = (self.flags & ~0x7) | (mode & 0x7)
# Deprecated methods, will be removed in the future
# =================================================
@deprecated('v')
def _get_v(self): return self.v
@deprecated('v')
def _set_v(self, v): self.v = v
@deprecated('li')
def _get_li(self): return self.li
@deprecated('li')
def _set_li(self, li): self.li = li
@deprecated('mode')
def _get_mode(self): return self.mode
@deprecated('mode')
def _set_mode(self, mode): self.mode = mode
# =================================================
__s = '\x24\x02\x04\xef\x00\x00\x00\x84\x00\x00\x33\x27\xc1\x02\x04\x02\xc8\x90\xec\x11\x22\xae\x07\xe5\xc8\x90\xf9\xd9\xc0\x7e\x8c\xcd\xc8\x90\xf9\xd9\xda\xc5\xb0\x78\xc8\x90\xf9\xd9\xda\xc6\x8a\x93'
def test_ntp_pack():
n = NTP(__s)
assert (__s == str(n))
def test_ntp_unpack():
n = NTP(__s)
assert (n.li == NO_WARNING)
assert (n.v == 4)
assert (n.mode == SERVER)
assert (n.stratum == 2)
assert (n.id == '\xc1\x02\x04\x02')
# test get/set functions
n.li = ALARM_CONDITION
n.v = 3
n.mode = CLIENT
assert (n.li == ALARM_CONDITION)
assert (n.v == 3)
assert (n.mode == CLIENT)
if __name__ == '__main__':
test_ntp_pack()
test_ntp_unpack()
print 'Tests Successful...' |
isandlaTech/cohorte-demos | refs/heads/dev | led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-1.0.0-20141201.234602-19-python-distribution/repo/sleekxmpp/plugins/xep_0297/__init__.py | 13 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins.base import register_plugin
from sleekxmpp.plugins.xep_0297 import stanza
from sleekxmpp.plugins.xep_0297.stanza import Forwarded
from sleekxmpp.plugins.xep_0297.forwarded import XEP_0297
register_plugin(XEP_0297)
|
craigem/MyAdventures | refs/heads/master | mcpi/minecraft.py | 2 | from connection import Connection
from vec3 import Vec3
from event import BlockEvent
from block import Block
import math
from util import flatten
""" Minecraft PI low level api v0.1_1
Note: many methods have the parameter *arg. This solution makes it
simple to allow different types, and variable number of arguments.
The actual magic is a mix of flatten_parameters() and __iter__. Example:
A Cube class could implement __iter__ to work in Minecraft.setBlocks(c, id)
(Because of this, it's possible to "erase" arguments. CmdPlayer removes
entityId, by injecting [] that flattens to nothing)
@author: Aron Nieminen, Mojang AB"""
def intFloor(*args):
return [int(math.floor(x)) for x in flatten(args)]
class CmdPositioner:
"""Methods for setting and getting positions"""
def __init__(self, connection, packagePrefix):
self.conn = connection
self.pkg = packagePrefix
def getPos(self, id):
"""Get entity position (entityId:int) => Vec3"""
s = self.conn.sendReceive(self.pkg + ".getPos", id)
return Vec3(*map(float, s.split(",")))
def setPos(self, id, *args):
"""Set entity position (entityId:int, x,y,z)"""
self.conn.send(self.pkg + ".setPos", id, args)
def getTilePos(self, id):
"""Get entity tile position (entityId:int) => Vec3"""
s = self.conn.sendReceive(self.pkg + ".getTile", id)
return Vec3(*map(int, s.split(",")))
def setTilePos(self, id, *args):
"""Set entity tile position (entityId:int) => Vec3"""
self.conn.send(self.pkg + ".setTile", id, intFloor(*args))
def setting(self, setting, status):
"""Set a player setting (setting, status). keys: autojump"""
self.conn.send(
self.pkg + ".setting", setting, 1 if bool(status) else 0
)
class CmdEntity(CmdPositioner):
"""Methods for entities"""
def __init__(self, connection):
CmdPositioner.__init__(self, connection, "entity")
class CmdPlayer(CmdPositioner):
"""Methods for the host (Raspberry Pi) player"""
def __init__(self, connection):
CmdPositioner.__init__(self, connection, "player")
self.conn = connection
def getPos(self):
return CmdPositioner.getPos(self, [])
def setPos(self, *args):
return CmdPositioner.setPos(self, [], args)
def getTilePos(self):
return CmdPositioner.getTilePos(self, [])
def setTilePos(self, *args):
return CmdPositioner.setTilePos(self, [], args)
class CmdCamera:
def __init__(self, connection):
self.conn = connection
def setNormal(self, *args):
"""Set camera mode to normal Minecraft view ([entityId])"""
self.conn.send("camera.mode.setNormal", args)
def setFixed(self):
"""Set camera mode to fixed view"""
self.conn.send("camera.mode.setFixed")
def setFollow(self, *args):
"""Set camera mode to follow an entity ([entityId])"""
self.conn.send("camera.mode.setFollow", args)
def setPos(self, *args):
"""Set camera entity position (x,y,z)"""
self.conn.send("camera.setPos", args)
class CmdEvents:
"""Events"""
def __init__(self, connection):
self.conn = connection
def clearAll(self):
"""Clear all old events"""
self.conn.send("events.clear")
def pollBlockHits(self):
"""Only triggered by sword => [BlockEvent]"""
s = self.conn.sendReceive("events.block.hits")
events = [e for e in s.split("|") if e]
return [BlockEvent.Hit(*map(int, e.split(","))) for e in events]
class Minecraft:
"""The main class to interact with a running instance of Minecraft Pi."""
def __init__(self, connection):
self.conn = connection
self.camera = CmdCamera(connection)
self.entity = CmdEntity(connection)
self.player = CmdPlayer(connection)
self.events = CmdEvents(connection)
def getBlock(self, *args):
"""Get block (x,y,z) => id:int"""
return int(self.conn.sendReceive("world.getBlock", intFloor(args)))
def getBlockWithData(self, *args):
"""Get block with data (x,y,z) => Block"""
ans = self.conn.sendReceive("world.getBlockWithData", intFloor(args))
return Block(*map(int, ans.split(",")))
"""
@TODO
"""
def getBlocks(self, *args):
"""Get a cuboid of blocks (x0,y0,z0,x1,y1,z1) => [id:int]"""
return int(self.conn.sendReceive("world.getBlocks", intFloor(args)))
def setBlock(self, *args):
"""Set block (x,y,z,id,[data])"""
self.conn.send("world.setBlock", intFloor(args))
def setBlocks(self, *args):
"""Set a cuboid of blocks (x0,y0,z0,x1,y1,z1,id,[data])"""
self.conn.send("world.setBlocks", intFloor(args))
def getHeight(self, *args):
"""Get the height of the world (x,z) => int"""
return int(self.conn.sendReceive("world.getHeight", intFloor(args)))
def getPlayerEntityIds(self):
"""Get the entity ids of the connected players => [id:int]"""
ids = self.conn.sendReceive("world.getPlayerIds")
return map(int, ids.split("|"))
def saveCheckpoint(self):
"""Save a checkpoint that can be used for restoring the world"""
self.conn.send("world.checkpoint.save")
def restoreCheckpoint(self):
"""Restore the world state to the checkpoint"""
self.conn.send("world.checkpoint.restore")
def postToChat(self, msg):
"""Post a message to the game chat"""
self.conn.send("chat.post", msg)
def setting(self, setting, status):
"""Set a world setting (setting, status). keys: world_immutable,
nametags_visible"""
self.conn.send("world.setting", setting, 1 if bool(status) else 0)
@staticmethod
def create(address="localhost", port=4711):
return Minecraft(Connection(address, port))
if __name__ == "__main__":
mc = Minecraft.create()
mc.postToChat("Hello, Minecraft!")
|
openhatch/new-mini-tasks | refs/heads/master | vendor/packages/Django/django/conf/locale/sv/__init__.py | 12133432 | |
jakubroztocil/cloudtunes | refs/heads/master | cloudtunes-server/cloudtunes/services/musicbrainz/__init__.py | 12133432 | |
sbuss/voteswap | refs/heads/master | lib/django/conf/locale/es_CO/__init__.py | 12133432 | |
firstjob/python-social-auth | refs/heads/master | social/apps/django_app/default/admin.py | 72 | """Admin settings"""
from django.conf import settings
from django.contrib import admin
from social.utils import setting_name
from social.apps.django_app.default.models import UserSocialAuth, Nonce, \
Association
class UserSocialAuthOption(admin.ModelAdmin):
"""Social Auth user options"""
list_display = ('user', 'id', 'provider', 'uid')
list_filter = ('provider',)
raw_id_fields = ('user',)
list_select_related = True
def get_search_fields(self, request=None):
search_fields = getattr(
settings, setting_name('ADMIN_USER_SEARCH_FIELDS'), None
)
if search_fields is None:
_User = UserSocialAuth.user_model()
username = getattr(_User, 'USERNAME_FIELD', None) or \
hasattr(_User, 'username') and 'username' or \
None
fieldnames = ('first_name', 'last_name', 'email', username)
all_names = _User._meta.get_all_field_names()
search_fields = [name for name in fieldnames
if name and name in all_names]
return ['user__' + name for name in search_fields]
class NonceOption(admin.ModelAdmin):
"""Nonce options"""
list_display = ('id', 'server_url', 'timestamp', 'salt')
search_fields = ('server_url',)
class AssociationOption(admin.ModelAdmin):
"""Association options"""
list_display = ('id', 'server_url', 'assoc_type')
list_filter = ('assoc_type',)
search_fields = ('server_url',)
admin.site.register(UserSocialAuth, UserSocialAuthOption)
admin.site.register(Nonce, NonceOption)
admin.site.register(Association, AssociationOption)
|
wangxuan007/flasky | refs/heads/master | venv/lib/python2.7/site-packages/flask/testsuite/ext.py | 563 | # -*- coding: utf-8 -*-
"""
flask.testsuite.ext
~~~~~~~~~~~~~~~~~~~
Tests the extension import thing.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import unittest
try:
from imp import reload as reload_module
except ImportError:
reload_module = reload
from flask.testsuite import FlaskTestCase
from flask._compat import PY2
class ExtImportHookTestCase(FlaskTestCase):
def setup(self):
# we clear this out for various reasons. The most important one is
# that a real flaskext could be in there which would disable our
# fake package. Secondly we want to make sure that the flaskext
# import hook does not break on reloading.
for entry, value in list(sys.modules.items()):
if (entry.startswith('flask.ext.') or
entry.startswith('flask_') or
entry.startswith('flaskext.') or
entry == 'flaskext') and value is not None:
sys.modules.pop(entry, None)
from flask import ext
reload_module(ext)
# reloading must not add more hooks
import_hooks = 0
for item in sys.meta_path:
cls = type(item)
if cls.__module__ == 'flask.exthook' and \
cls.__name__ == 'ExtensionImporter':
import_hooks += 1
self.assert_equal(import_hooks, 1)
def teardown(self):
from flask import ext
for key in ext.__dict__:
self.assert_not_in('.', key)
def test_flaskext_new_simple_import_normal(self):
from flask.ext.newext_simple import ext_id
self.assert_equal(ext_id, 'newext_simple')
def test_flaskext_new_simple_import_module(self):
from flask.ext import newext_simple
self.assert_equal(newext_simple.ext_id, 'newext_simple')
self.assert_equal(newext_simple.__name__, 'flask_newext_simple')
def test_flaskext_new_package_import_normal(self):
from flask.ext.newext_package import ext_id
self.assert_equal(ext_id, 'newext_package')
def test_flaskext_new_package_import_module(self):
from flask.ext import newext_package
self.assert_equal(newext_package.ext_id, 'newext_package')
self.assert_equal(newext_package.__name__, 'flask_newext_package')
def test_flaskext_new_package_import_submodule_function(self):
from flask.ext.newext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_new_package_import_submodule(self):
from flask.ext.newext_package import submodule
self.assert_equal(submodule.__name__, 'flask_newext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_simple_import_normal(self):
from flask.ext.oldext_simple import ext_id
self.assert_equal(ext_id, 'oldext_simple')
def test_flaskext_old_simple_import_module(self):
from flask.ext import oldext_simple
self.assert_equal(oldext_simple.ext_id, 'oldext_simple')
self.assert_equal(oldext_simple.__name__, 'flaskext.oldext_simple')
def test_flaskext_old_package_import_normal(self):
from flask.ext.oldext_package import ext_id
self.assert_equal(ext_id, 'oldext_package')
def test_flaskext_old_package_import_module(self):
from flask.ext import oldext_package
self.assert_equal(oldext_package.ext_id, 'oldext_package')
self.assert_equal(oldext_package.__name__, 'flaskext.oldext_package')
def test_flaskext_old_package_import_submodule(self):
from flask.ext.oldext_package import submodule
self.assert_equal(submodule.__name__, 'flaskext.oldext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_package_import_submodule_function(self):
from flask.ext.oldext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_broken_package_no_module_caching(self):
for x in range(2):
with self.assert_raises(ImportError):
import flask.ext.broken
def test_no_error_swallowing(self):
try:
import flask.ext.broken
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
self.assert_true(exc_type is ImportError)
if PY2:
message = 'No module named missing_module'
else:
message = 'No module named \'missing_module\''
self.assert_equal(str(exc_value), message)
self.assert_true(tb.tb_frame.f_globals is globals())
# reraise() adds a second frame so we need to skip that one too.
# On PY3 we even have another one :(
next = tb.tb_next.tb_next
if not PY2:
next = next.tb_next
self.assert_in('flask_broken/__init__.py', next.tb_frame.f_code.co_filename)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtImportHookTestCase))
return suite
|
youdonghai/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/db/backends/mysql/introspection.py | 313 | from django.db.backends import BaseDatabaseIntrospection
from MySQLdb import ProgrammingError, OperationalError
from MySQLdb.constants import FIELD_TYPE
import re
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'IntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SHOW TABLES")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return cursor.description
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
my_field_dict = self._name_to_index(cursor, table_name)
constraints = []
relations = {}
try:
# This should work for MySQL 5.0.
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
constraints.extend(cursor.fetchall())
except (ProgrammingError, OperationalError):
# Fall back to "SHOW CREATE TABLE", for previous MySQL versions.
# Go through all constraints and save the equal matches.
cursor.execute("SHOW CREATE TABLE %s" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
pos = 0
while True:
match = foreign_key_re.search(row[1], pos)
if match == None:
break
pos = match.end()
constraints.append(match.groups())
for my_fieldname, other_table, other_field in constraints:
other_field_index = self._name_to_index(cursor, other_table)[other_field]
my_field_index = my_field_dict[my_fieldname]
relations[my_field_index] = (other_field_index, other_table)
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
indexes = {}
for row in cursor.fetchall():
indexes[row[4]] = {'primary_key': (row[2] == 'PRIMARY'), 'unique': not bool(row[1])}
return indexes
|
nzavagli/UnrealPy | refs/heads/master | UnrealPyEmbed/Source/Python/Lib/python27/test/test_frozen.py | 134 | # Test the frozen module defined in frozen.c.
from test.test_support import captured_stdout, run_unittest
import unittest
import sys
class FrozenTests(unittest.TestCase):
def test_frozen(self):
with captured_stdout() as stdout:
try:
import __hello__
except ImportError, x:
self.fail("import __hello__ failed:" + str(x))
try:
import __phello__
except ImportError, x:
self.fail("import __phello__ failed:" + str(x))
try:
import __phello__.spam
except ImportError, x:
self.fail("import __phello__.spam failed:" + str(x))
try:
import __phello__.foo
except ImportError:
pass
else:
self.fail("import __phello__.foo should have failed")
self.assertEqual(stdout.getvalue(),
'Hello world...\nHello world...\nHello world...\n')
del sys.modules['__hello__']
del sys.modules['__phello__']
del sys.modules['__phello__.spam']
def test_main():
run_unittest(FrozenTests)
if __name__ == '__main__':
test_main()
|
RiccardoPecora/MP | refs/heads/master | Lib/encodings/latin_1.py | 103 | """ Python 'latin-1' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.latin_1_encode
decode = codecs.latin_1_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.latin_1_encode(input,self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.latin_1_decode(input,self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.latin_1_decode
decode = codecs.latin_1_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
shimpe/frescobaldi | refs/heads/master | frescobaldi_app/popplerdummy.py | 1 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
A dummy surface only showing a "could not load popplerqt4 module" message.
"""
from __future__ import unicode_literals
from PyQt4.QtGui import QLabel, QLayout, QVBoxLayout, QWidget
import qpopplerview
import app
class Surface(qpopplerview.Surface):
def __init__(self, view):
super(Surface, self).__init__(view)
self._msg = QLabel(openExternalLinks = True)
layout = QVBoxLayout(sizeConstraint = QLayout.SetFixedSize)
self.setLayout(layout)
layout.addWidget(self._msg)
app.translateUI(self)
def translateUI(self):
self._msg.setText(_("Could not load the {name} module.").format(
name = '<a href="https://github.com/wbsoft/python-poppler-qt4">popplerqt4</a>'))
def paintEvent(self, ev):
QWidget.paintEvent(self, ev)
|
AOSPU/external_chromium_org | refs/heads/android-5.0/py3 | third_party/bintrees/bintrees/avltree.py | 156 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman (python version)
# Purpose: avl tree module (Julienne Walker's unbounded none recursive algorithm)
# source: http://eternallyconfuzzled.com/tuts/datastructures/jsw_tut_avl.aspx
# Created: 01.05.2010
# Copyright (c) 2010-2013 by Manfred Moitzi
# License: MIT License
# Conclusion of Julienne Walker
# AVL trees are about as close to optimal as balanced binary search trees can
# get without eating up resources. You can rest assured that the O(log N)
# performance of binary search trees is guaranteed with AVL trees, but the extra
# bookkeeping required to maintain an AVL tree can be prohibitive, especially
# if deletions are common. Insertion into an AVL tree only requires one single
# or double rotation, but deletion could perform up to O(log N) rotations, as
# in the example of a worst case AVL (ie. Fibonacci) tree. However, those cases
# are rare, and still very fast.
# AVL trees are best used when degenerate sequences are common, and there is
# little or no locality of reference in nodes. That basically means that
# searches are fairly random. If degenerate sequences are not common, but still
# possible, and searches are random then a less rigid balanced tree such as red
# black trees or Andersson trees are a better solution. If there is a significant
# amount of locality to searches, such as a small cluster of commonly searched
# items, a splay tree is theoretically better than all of the balanced trees
# because of its move-to-front design.
from __future__ import absolute_import
from .treemixin import TreeMixin
from array import array
__all__ = ['AVLTree']
MAXSTACK = 32
class Node(object):
""" Internal object, represents a treenode """
__slots__ = ['left', 'right', 'balance', 'key', 'value']
def __init__(self, key=None, value=None):
self.left = None
self.right = None
self.key = key
self.value = value
self.balance = 0
def __getitem__(self, key):
""" x.__getitem__(key) <==> x[key], where key is 0 (left) or 1 (right) """
return self.left if key == 0 else self.right
def __setitem__(self, key, value):
""" x.__setitem__(key, value) <==> x[key]=value, where key is 0 (left) or 1 (right) """
if key == 0:
self.left = value
else:
self.right = value
def free(self):
"""Remove all references."""
self.left = None
self.right = None
self.key = None
self.value = None
def height(node):
return node.balance if node is not None else -1
def jsw_single(root, direction):
other_side = 1 - direction
save = root[other_side]
root[other_side] = save[direction]
save[direction] = root
rlh = height(root.left)
rrh = height(root.right)
slh = height(save[other_side])
root.balance = max(rlh, rrh) + 1
save.balance = max(slh, root.balance) + 1
return save
def jsw_double(root, direction):
other_side = 1 - direction
root[other_side] = jsw_single(root[other_side], other_side)
return jsw_single(root, direction)
class AVLTree(TreeMixin):
"""
AVLTree implements a balanced binary tree with a dict-like interface.
see: http://en.wikipedia.org/wiki/AVL_tree
In computer science, an AVL tree is a self-balancing binary search tree, and
it is the first such data structure to be invented. In an AVL tree, the
heights of the two child subtrees of any node differ by at most one;
therefore, it is also said to be height-balanced. Lookup, insertion, and
deletion all take O(log n) time in both the average and worst cases, where n
is the number of nodes in the tree prior to the operation. Insertions and
deletions may require the tree to be rebalanced by one or more tree rotations.
The AVL tree is named after its two inventors, G.M. Adelson-Velskii and E.M.
Landis, who published it in their 1962 paper "An algorithm for the
organization of information."
AVLTree() -> new empty tree.
AVLTree(mapping) -> new tree initialized from a mapping
AVLTree(seq) -> new tree initialized from seq [(k1, v1), (k2, v2), ... (kn, vn)]
see also TreeMixin() class.
"""
def __init__(self, items=None):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signature """
self._root = None
self._count = 0
if items is not None:
self.update(items)
def clear(self):
""" T.clear() -> None. Remove all items from T. """
def _clear(node):
if node is not None:
_clear(node.left)
_clear(node.right)
node.free()
_clear(self._root)
self._count = 0
self._root = None
@property
def count(self):
""" count of items """
return self._count
@property
def root(self):
""" root node of T """
return self._root
def _new_node(self, key, value):
""" Create a new treenode """
self._count += 1
return Node(key, value)
def insert(self, key, value):
""" T.insert(key, value) <==> T[key] = value, insert key, value into Tree """
if self._root is None:
self._root = self._new_node(key, value)
else:
node_stack = [] # node stack
dir_stack = array('I') # direction stack
done = False
top = 0
node = self._root
# search for an empty link, save path
while True:
if key == node.key: # update existing item
node.value = value
return
direction = 1 if key > node.key else 0
dir_stack.append(direction)
node_stack.append(node)
if node[direction] is None:
break
node = node[direction]
# Insert a new node at the bottom of the tree
node[direction] = self._new_node(key, value)
# Walk back up the search path
top = len(node_stack) - 1
while (top >= 0) and not done:
direction = dir_stack[top]
other_side = 1 - direction
topnode = node_stack[top]
left_height = height(topnode[direction])
right_height = height(topnode[other_side])
# Terminate or rebalance as necessary */
if left_height - right_height == 0:
done = True
if left_height - right_height >= 2:
a = topnode[direction][direction]
b = topnode[direction][other_side]
if height(a) >= height(b):
node_stack[top] = jsw_single(topnode, other_side)
else:
node_stack[top] = jsw_double(topnode, other_side)
# Fix parent
if top != 0:
node_stack[top - 1][dir_stack[top - 1]] = node_stack[top]
else:
self._root = node_stack[0]
done = True
# Update balance factors
topnode = node_stack[top]
left_height = height(topnode[direction])
right_height = height(topnode[other_side])
topnode.balance = max(left_height, right_height) + 1
top -= 1
def remove(self, key):
""" T.remove(key) <==> del T[key], remove item <key> from tree """
if self._root is None:
raise KeyError(str(key))
else:
node_stack = [None] * MAXSTACK # node stack
dir_stack = array('I', [0] * MAXSTACK) # direction stack
top = 0
node = self._root
while True:
# Terminate if not found
if node is None:
raise KeyError(str(key))
elif node.key == key:
break
# Push direction and node onto stack
direction = 1 if key > node.key else 0
dir_stack[top] = direction
node_stack[top] = node
node = node[direction]
top += 1
# Remove the node
if (node.left is None) or (node.right is None):
# Which child is not null?
direction = 1 if node.left is None else 0
# Fix parent
if top != 0:
node_stack[top - 1][dir_stack[top - 1]] = node[direction]
else:
self._root = node[direction]
node.free()
self._count -= 1
else:
# Find the inorder successor
heir = node.right
# Save the path
dir_stack[top] = 1
node_stack[top] = node
top += 1
while heir.left is not None:
dir_stack[top] = 0
node_stack[top] = heir
top += 1
heir = heir.left
# Swap data
node.key = heir.key
node.value = heir.value
# Unlink successor and fix parent
xdir = 1 if node_stack[top - 1].key == node.key else 0
node_stack[top - 1][xdir] = heir.right
heir.free()
self._count -= 1
# Walk back up the search path
top -= 1
while top >= 0:
direction = dir_stack[top]
other_side = 1 - direction
topnode = node_stack[top]
left_height = height(topnode[direction])
right_height = height(topnode[other_side])
b_max = max(left_height, right_height)
# Update balance factors
topnode.balance = b_max + 1
# Terminate or rebalance as necessary
if (left_height - right_height) == -1:
break
if (left_height - right_height) <= -2:
a = topnode[other_side][direction]
b = topnode[other_side][other_side]
if height(a) <= height(b):
node_stack[top] = jsw_single(topnode, direction)
else:
node_stack[top] = jsw_double(topnode, direction)
# Fix parent
if top != 0:
node_stack[top - 1][dir_stack[top - 1]] = node_stack[top]
else:
self._root = node_stack[0]
top -= 1
|
luosch/vinda | refs/heads/master | vinda/vinda.py | 1 | # -*- coding:utf-8 -*-
import os
import codecs
from jinja2 import Environment, FileSystemLoader
def look(path='.', deep_limit=3, ignore=[]):
env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__))))
template = env.get_template('template.html')
link = []
for name in os.listdir(path):
full_path = os.path.join(path, name)
print 'vinda ->', full_path
if name not in ignore and os.path.isdir(full_path):
link.append({'href': name.decode('utf-8'), 'name': name.decode('utf-8')})
if deep_limit > 0:
look(path=full_path, deep_limit=deep_limit - 1, ignore=ignore)
if name not in ignore and os.path.isfile(full_path):
link.append({'href': name.decode('utf-8'), 'name': name.decode('utf-8')})
if link:
with open(path + '/index.html', mode='w') as f:
f.write(template.render(link=link).encode('utf-8'))
|
pavelkuchin/tracktrains | refs/heads/master | utils/__init__.py | 12133432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.